hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
e82e59b85bb753814d14132cae2ca436d80020d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/IVFUtils.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/Tensor.cuh> #include <faiss/gpu/utils/ThrustAllocator.cuh> #include <thrust/scan.h> #include <thrust/execution_policy.h> namespace faiss { namespace gpu { // Calculates the total number of intermediate distances to consider // for all queries __global__ void getResultLengths(Tensor<int, 2, true> topQueryToCentroid, int* listLengths, int totalSize, Tensor<int, 2, true> length) { int linearThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (linearThreadId >= totalSize) { return; } int nprobe = topQueryToCentroid.getSize(1); int queryId = linearThreadId / nprobe; int listId = linearThreadId % nprobe; int centroidId = topQueryToCentroid[queryId][listId]; // Safety guard in case NaNs in input cause no list ID to be generated length[queryId][listId] = (centroidId != -1) ? listLengths[centroidId] : 0; } void runCalcListOffsets(Tensor<int, 2, true>& topQueryToCentroid, thrust::device_vector<int>& listLengths, Tensor<int, 2, true>& prefixSumOffsets, Tensor<char, 1, true>& thrustMem, hipStream_t stream) { FAISS_ASSERT(topQueryToCentroid.getSize(0) == prefixSumOffsets.getSize(0)); FAISS_ASSERT(topQueryToCentroid.getSize(1) == prefixSumOffsets.getSize(1)); int totalSize = topQueryToCentroid.numElements(); int numThreads = ::min(totalSize, getMaxThreadsCurrentDevice()); int numBlocks = utils::divUp(totalSize, numThreads); auto grid = dim3(numBlocks); auto block = dim3(numThreads); hipLaunchKernelGGL(( getResultLengths), dim3(grid), dim3(block), 0, stream, topQueryToCentroid, listLengths.data().get(), totalSize, prefixSumOffsets); CUDA_TEST_ERROR(); // Prefix sum of the indices, so we know where the intermediate // results should be maintained // Thrust wants a place for its temporary allocations, so provide // one, so it won't call hipMalloc/Free GpuResourcesThrustAllocator alloc(thrustMem.data(), thrustMem.getSizeInBytes()); thrust::inclusive_scan(thrust::hip::par(alloc).on(stream), prefixSumOffsets.data(), prefixSumOffsets.data() + totalSize, prefixSumOffsets.data()); CUDA_TEST_ERROR(); } } } // namespace
e82e59b85bb753814d14132cae2ca436d80020d4.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/IVFUtils.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/Tensor.cuh> #include <faiss/gpu/utils/ThrustAllocator.cuh> #include <thrust/scan.h> #include <thrust/execution_policy.h> namespace faiss { namespace gpu { // Calculates the total number of intermediate distances to consider // for all queries __global__ void getResultLengths(Tensor<int, 2, true> topQueryToCentroid, int* listLengths, int totalSize, Tensor<int, 2, true> length) { int linearThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (linearThreadId >= totalSize) { return; } int nprobe = topQueryToCentroid.getSize(1); int queryId = linearThreadId / nprobe; int listId = linearThreadId % nprobe; int centroidId = topQueryToCentroid[queryId][listId]; // Safety guard in case NaNs in input cause no list ID to be generated length[queryId][listId] = (centroidId != -1) ? listLengths[centroidId] : 0; } void runCalcListOffsets(Tensor<int, 2, true>& topQueryToCentroid, thrust::device_vector<int>& listLengths, Tensor<int, 2, true>& prefixSumOffsets, Tensor<char, 1, true>& thrustMem, cudaStream_t stream) { FAISS_ASSERT(topQueryToCentroid.getSize(0) == prefixSumOffsets.getSize(0)); FAISS_ASSERT(topQueryToCentroid.getSize(1) == prefixSumOffsets.getSize(1)); int totalSize = topQueryToCentroid.numElements(); int numThreads = std::min(totalSize, getMaxThreadsCurrentDevice()); int numBlocks = utils::divUp(totalSize, numThreads); auto grid = dim3(numBlocks); auto block = dim3(numThreads); getResultLengths<<<grid, block, 0, stream>>>( topQueryToCentroid, listLengths.data().get(), totalSize, prefixSumOffsets); CUDA_TEST_ERROR(); // Prefix sum of the indices, so we know where the intermediate // results should be maintained // Thrust wants a place for its temporary allocations, so provide // one, so it won't call cudaMalloc/Free GpuResourcesThrustAllocator alloc(thrustMem.data(), thrustMem.getSizeInBytes()); thrust::inclusive_scan(thrust::cuda::par(alloc).on(stream), prefixSumOffsets.data(), prefixSumOffsets.data() + totalSize, prefixSumOffsets.data()); CUDA_TEST_ERROR(); } } } // namespace
7466c6cb35268e1a68c3165e4ec8bb6bee951243.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <functional> #include <map> #include <vector> // #include "thrust/functional.h" // #include "thrust/sort.h" #include "caffe/common.hpp" #include "caffe/util/bbox_util.hpp" namespace caffe { template <typename Dtype> __host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox, const bool normalized) { if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) { // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. return Dtype(0.); } else { const Dtype width = bbox[2] - bbox[0]; const Dtype height = bbox[3] - bbox[1]; if (normalized) { return width * height; } else { // If bbox is not within range [0, 1]. return (width + 1) * (height + 1); } } } template __host__ __device__ float BBoxSizeGPU(const float* bbox, const bool normalized); template __host__ __device__ double BBoxSizeGPU(const double* bbox, const bool normalized); template <typename Dtype> __host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1, const Dtype* bbox2) { if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] || bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) { return Dtype(0.); } else { const Dtype inter_xmin = max(bbox1[0], bbox2[0]); const Dtype inter_ymin = max(bbox1[1], bbox2[1]); const Dtype inter_xmax = min(bbox1[2], bbox2[2]); const Dtype inter_ymax = min(bbox1[3], bbox2[3]); const Dtype inter_width = inter_xmax - inter_xmin; const Dtype inter_height = inter_ymax - inter_ymin; const Dtype inter_size = inter_width * inter_height; const Dtype bbox1_size = BBoxSizeGPU(bbox1); const Dtype bbox2_size = BBoxSizeGPU(bbox2); return inter_size / (bbox1_size + bbox2_size - inter_size); } } template __host__ __device__ float JaccardOverlapGPU(const float* bbox1, const float* bbox2); template __host__ __device__ double JaccardOverlapGPU(const double* bbox1, const double* bbox2); template <typename Dtype> __device__ Dtype Min(const Dtype x, const Dtype y) { return x < y ? x : y; } template <typename Dtype> __device__ Dtype Max(const Dtype x, const Dtype y) { return x > y ? x : y; } template <typename Dtype> __device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) { for (int i = 0; i < 4; ++i) { clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.)); } } template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox); template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox); template <typename Dtype> __global__ void DecodeBBoxesKernel(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % 4; const int c = (index / 4) % num_loc_classes; const int d = (index / 4 / num_loc_classes) % num_priors; if (!share_location && c == background_label_id) { // Ignore background class if not share_location. return; } const int pi = d * 4; const int vi = pi + num_priors * 4; if (code_type == PriorBoxParameter_CodeType_CORNER) { if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = prior_data[pi + i] + loc_data[index]; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i]; } } else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) { const Dtype p_xmin = prior_data[pi]; const Dtype p_ymin = prior_data[pi + 1]; const Dtype p_xmax = prior_data[pi + 2]; const Dtype p_ymax = prior_data[pi + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; const Dtype prior_center_x = (p_xmin + p_xmax) / 2.; const Dtype prior_center_y = (p_ymin + p_ymax) / 2.; const Dtype xmin = loc_data[index - i]; const Dtype ymin = loc_data[index - i + 1]; const Dtype xmax = loc_data[index - i + 2]; const Dtype ymax = loc_data[index - i + 3]; Dtype decode_bbox_center_x, decode_bbox_center_y; Dtype decode_bbox_width, decode_bbox_height; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to retore the offset // predictions. decode_bbox_center_x = xmin * prior_width + prior_center_x; decode_bbox_center_y = ymin * prior_height + prior_center_y; decode_bbox_width = exp(xmax) * prior_width; decode_bbox_height = exp(ymax) * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x; decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y; decode_bbox_width = exp(prior_data[vi + 2] * xmax) * prior_width; decode_bbox_height = exp(prior_data[vi + 3] * ymax) * prior_height; } switch (i) { case 0: bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.; break; case 1: bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.; break; case 2: bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.; break; case 3: bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.; break; } } else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) { const Dtype p_xmin = prior_data[pi]; const Dtype p_ymin = prior_data[pi + 1]; const Dtype p_xmax = prior_data[pi + 2]; const Dtype p_ymax = prior_data[pi + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; Dtype p_size; if (i == 0 || i == 2) { p_size = prior_width; } else { p_size = prior_height; } if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size; } } else { // Unknown code type. } if (clip_bbox) { bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.)); } } } template <typename Dtype> __global__ void CasRegDecodeBBoxesKernel(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % 4; const int c = (index / 4) % num_loc_classes; const int d = (index / 4 / num_loc_classes) % num_priors; if (!share_location && c == background_label_id) { // Ignore background class if not share_location. return; } const int pi = d * 4; const int vi = pi + num_priors * 4; if (code_type == PriorBoxParameter_CodeType_CORNER) { if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = bbox_data[index] + loc_data[index]; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = bbox_data[index] + loc_data[index] * prior_data[vi + i]; } } else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) { const Dtype p_xmin = bbox_data[index - i]; const Dtype p_ymin = bbox_data[index - i + 1]; const Dtype p_xmax = bbox_data[index - i + 2]; const Dtype p_ymax = bbox_data[index - i + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; const Dtype prior_center_x = (p_xmin + p_xmax) / 2.; const Dtype prior_center_y = (p_ymin + p_ymax) / 2.; const Dtype xmin = loc_data[index - i]; const Dtype ymin = loc_data[index - i + 1]; const Dtype xmax = loc_data[index - i + 2]; const Dtype ymax = loc_data[index - i + 3]; Dtype decode_bbox_center_x, decode_bbox_center_y; Dtype decode_bbox_width, decode_bbox_height; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to retore the offset // predictions. decode_bbox_center_x = xmin * prior_width + prior_center_x; decode_bbox_center_y = ymin * prior_height + prior_center_y; decode_bbox_width = exp(xmax) * prior_width; decode_bbox_height = exp(ymax) * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x; decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y; decode_bbox_width = exp(prior_data[vi + 2] * xmax) * prior_width; decode_bbox_height = exp(prior_data[vi + 3] * ymax) * prior_height; } switch (i) { case 0: bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.; break; case 1: bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.; break; case 2: bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.; break; case 3: bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.; break; } } else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) { const Dtype p_xmin = bbox_data[index - i]; const Dtype p_ymin = bbox_data[index - i + 1]; const Dtype p_xmax = bbox_data[index - i + 2]; const Dtype p_ymax = bbox_data[index - i + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; Dtype p_size; if (i == 0 || i == 2) { p_size = prior_width; } else { p_size = prior_height; } if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = bbox_data[index] + loc_data[index] * p_size; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = bbox_data[index] + loc_data[index] * prior_data[vi + i] * p_size; } } else { // Unknown code type. } if (clip_bbox) { bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.)); } } } template <typename Dtype> void DecodeBBoxesGPU(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, bbox_data); CUDA_POST_KERNEL_CHECK; } template void DecodeBBoxesGPU(const int nthreads, const float* loc_data, const float* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, float* bbox_data); template void DecodeBBoxesGPU(const int nthreads, const double* loc_data, const double* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, double* bbox_data); /************************************************************************************/ template <typename Dtype> void CasRegDecodeBBoxesGPU(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data, const Dtype* arm_loc_data) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, arm_loc_data, prior_data, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, bbox_data); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( CasRegDecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, bbox_data); CUDA_POST_KERNEL_CHECK; } template void CasRegDecodeBBoxesGPU(const int nthreads, const float* loc_data, const float* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, float* bbox_data, const float* arm_loc_data); template void CasRegDecodeBBoxesGPU(const int nthreads, const double* loc_data, const double* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, double* bbox_data, const double* arm_loc_data); /************************************************************************************/ template <typename Dtype> __global__ void PermuteDataKernel(const int nthreads, const Dtype* data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % num_dim; const int c = (index / num_dim) % num_classes; const int d = (index / num_dim / num_classes) % num_data; const int n = index / num_dim / num_classes / num_data; const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i; new_data[new_index] = data[index]; } } template <typename Dtype> void PermuteDataGPU(const int nthreads, const Dtype* data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, num_classes, num_data, num_dim, new_data); CUDA_POST_KERNEL_CHECK; } template void PermuteDataGPU(const int nthreads, const float* data, const int num_classes, const int num_data, const int num_dim, float* new_data); template void PermuteDataGPU(const int nthreads, const double* data, const int num_classes, const int num_data, const int num_dim, double* new_data); template <typename Dtype> __global__ void OSPermuteDataKernel(const int nthreads, const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data, float objectness_score) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % num_dim; const int c = (index / num_dim) % num_classes; const int d = (index / num_dim / num_classes) % num_data; const int n = index / num_dim / num_classes / num_data; const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i; const int arm_index = ((n * num_data + d) * 2 + 1) * num_dim + i; if (arm_data[arm_index] < objectness_score) { if (c == 0) new_data[new_index] = 1.0; else new_data[new_index] = 0.0; } else { new_data[new_index] = data[index]; } } } template <typename Dtype> void OSPermuteDataGPU(const int nthreads, const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data, float objectness_score) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( OSPermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, arm_data, num_classes, num_data, num_dim, new_data, objectness_score); CUDA_POST_KERNEL_CHECK; } template void OSPermuteDataGPU(const int nthreads, const float* data, const float* arm_data, const int num_classes, const int num_data, const int num_dim, float* new_data, float objectness_score); template void OSPermuteDataGPU(const int nthreads, const double* data, const double* arm_data, const int num_classes, const int num_data, const int num_dim, double* new_data, float objectness_score); template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] = channel_data[index] - channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> void SoftMaxGPU(const Dtype* data, const int outer_num, const int channels, const int inner_num, Dtype* prob) { vector<int> shape(4, 1); shape[0] = outer_num; shape[1] = channels; shape[2] = inner_num; Blob<Dtype> scale(shape); Dtype* scale_data = scale.mutable_gpu_data(); int count = outer_num * channels * inner_num; // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num, data, scale_data, prob); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, prob, prob); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, prob, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num, scale_data, prob); } template void SoftMaxGPU(const float* data, const int outer_num, const int channels, const int inner_num, float* prob); template void SoftMaxGPU(const double* data, const int outer_num, const int channels, const int inner_num, double* prob); template <typename Dtype> __global__ void ComputeOverlappedKernel(const int nthreads, const Dtype* bbox_data, const int num_bboxes, const int num_classes, const Dtype overlap_threshold, bool* overlapped_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int j = index % num_bboxes; const int i = (index / num_bboxes) % num_bboxes; if (i == j) { // Ignore same bbox. return; } const int c = (index / num_bboxes / num_bboxes) % num_classes; const int n = index / num_bboxes / num_bboxes / num_classes; // Compute overlap between i-th bbox and j-th bbox. const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4; const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4; const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i, bbox_data + start_loc_j); if (overlap > overlap_threshold) { overlapped_data[index] = true; } } } template <typename Dtype> void ComputeOverlappedGPU(const int nthreads, const Dtype* bbox_data, const int num_bboxes, const int num_classes, const Dtype overlap_threshold, bool* overlapped_data) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ComputeOverlappedKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, num_bboxes, num_classes, overlap_threshold, overlapped_data); CUDA_POST_KERNEL_CHECK; } template void ComputeOverlappedGPU(const int nthreads, const float* bbox_data, const int num_bboxes, const int num_classes, const float overlap_threshold, bool* overlapped_data); template void ComputeOverlappedGPU(const int nthreads, const double* bbox_data, const int num_bboxes, const int num_classes, const double overlap_threshold, bool* overlapped_data); template <typename Dtype> __global__ void ComputeOverlappedByIdxKernel(const int nthreads, const Dtype* bbox_data, const Dtype overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int j = index % num_idx; const int i = (index / num_idx); if (i == j) { // Ignore same bbox. return; } // Compute overlap between i-th bbox and j-th bbox. const int start_loc_i = idx[i] * 4; const int start_loc_j = idx[j] * 4; const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i, bbox_data + start_loc_j); if (overlap > overlap_threshold) { overlapped_data[index] = true; } } } template <typename Dtype> void ComputeOverlappedByIdxGPU(const int nthreads, const Dtype* bbox_data, const Dtype overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ComputeOverlappedByIdxKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, overlap_threshold, idx, num_idx, overlapped_data); CUDA_POST_KERNEL_CHECK; } template void ComputeOverlappedByIdxGPU(const int nthreads, const float* bbox_data, const float overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data); template void ComputeOverlappedByIdxGPU(const int nthreads, const double* bbox_data, const double overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data); template <typename Dtype> void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, vector<int>* indices) { // Keep part of detections whose scores are higher than confidence threshold. vector<int> idx; vector<Dtype> confidences; for (int i = 0; i < num_bboxes; ++i) { if (conf_data[i] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i]); } } int num_remain = confidences.size(); if (num_remain == 0) { return; } // Sort detections based on score. // thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0], ////////////////////////////////////////////////// by jyz // thrust::greater<Dtype>()); ////////////////////////////////////////////////// by jyz if (top_k > -1 && top_k < num_remain) { num_remain = top_k; } // Compute overlap between remaining detections. Blob<int> idx_blob(1, 1, 1, num_remain); int* idx_data = idx_blob.mutable_cpu_data(); std::copy(idx.begin(), idx.begin() + num_remain, idx_data); Blob<bool> overlapped(1, 1, num_remain, num_remain); const int total_bboxes = overlapped.count(); bool* overlapped_data = overlapped.mutable_gpu_data(); ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold, idx_blob.gpu_data(), num_remain, overlapped_data); // Do non-maximum suppression based on overlapped results. const bool* overlapped_results = overlapped.cpu_data(); vector<int> selected_indices; ApplyNMS(overlapped_results, num_remain, &selected_indices); // Put back the selected information. for (int i = 0; i < selected_indices.size(); ++i) { indices->push_back(idx[selected_indices[i]]); } } template void ApplyNMSGPU(const float* bbox_data, const float* conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, vector<int>* indices); template void ApplyNMSGPU(const double* bbox_data, const double* conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, vector<int>* indices); template <typename Dtype> __global__ void GetDetectionsKernel(const int nthreads, const Dtype* bbox_data, const Dtype* conf_data, const int image_id, const int label, const int* indices, const bool clip_bbox, Dtype* detection_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int det_idx = indices[index]; detection_data[index * 7] = image_id; detection_data[index * 7 + 1] = label; detection_data[index * 7 + 2] = conf_data[det_idx]; if (clip_bbox) { ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3])); } else { for (int i = 0; i < 4; ++i) { detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i]; } } } } template <typename Dtype> void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data, const int image_id, const int label, const vector<int>& indices, const bool clip_bbox, Blob<Dtype>* detection_blob) { // Store selected indices in array. int num_det = indices.size(); if (num_det == 0) { return; } Blob<int> idx_blob(1, 1, 1, num_det); int* idx_data = idx_blob.mutable_cpu_data(); std::copy(indices.begin(), indices.end(), idx_data); // Prepare detection_blob. detection_blob->Reshape(1, 1, num_det, 7); Dtype* detection_data = detection_blob->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( GetDetectionsKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_det)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_det, bbox_data, conf_data, image_id, label, idx_blob.gpu_data(), clip_bbox, detection_data); CUDA_POST_KERNEL_CHECK; } template void GetDetectionsGPU(const float* bbox_data, const float* conf_data, const int image_id, const int label, const vector<int>& indices, const bool clip_bbox, Blob<float>* detection_blob); template void GetDetectionsGPU(const double* bbox_data, const double* conf_data, const int image_id, const int label, const vector<int>& indices, const bool clip_bbox, Blob<double>* detection_blob); template <typename Dtype> __global__ void ComputeConfLossKernel(const int nthreads, const Dtype* conf_data, const int num_preds_per_class, const int num_classes, const ConfLossType loss_type, const Dtype* match_data, Dtype* conf_loss_data) { CUDA_KERNEL_LOOP(index, nthreads) { int label = match_data[index]; int num = index / num_preds_per_class; int p = index % num_preds_per_class; int start_idx = (num * num_preds_per_class + p) * num_classes; Dtype loss = 0; if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { // Compute softmax probability. Dtype prob = conf_data[start_idx + label]; loss = -log(Max(prob, Dtype(FLT_MIN))); } else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) { int target = 0; for (int c = 0; c < num_classes; ++c) { if (c == label) { target = 1; } else { target = 0; } Dtype input = conf_data[start_idx + c]; loss -= input * (target - (input >= 0)) - log(1 + exp(input - 2 * input * (input >= 0))); } } conf_loss_data[index] = loss; } } template <typename Dtype> void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num, const int num_preds_per_class, const int num_classes, const int background_label_id, const ConfLossType loss_type, const vector<map<int, vector<int> > >& all_match_indices, const map<int, vector<NormalizedBBox> >& all_gt_bboxes, vector<vector<float> >* all_conf_loss) { CHECK_LT(background_label_id, num_classes); Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1); Dtype* match_data = match_blob.mutable_cpu_data(); for (int i = 0; i < num; ++i) { const map<int, vector<int> >& match_indices = all_match_indices[i]; for (int p = 0; p < num_preds_per_class; ++p) { // Get the label index. int label = background_label_id; for (map<int, vector<int> >::const_iterator it = match_indices.begin(); it != match_indices.end(); ++it) { const vector<int>& match_index = it->second; CHECK_EQ(match_index.size(), num_preds_per_class); if (match_index[p] > -1) { CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end()); const vector<NormalizedBBox>& gt_bboxes = all_gt_bboxes.find(i)->second; CHECK_LT(match_index[p], gt_bboxes.size()); label = gt_bboxes[match_index[p]].label(); CHECK_GE(label, 0); CHECK_NE(label, background_label_id); CHECK_LT(label, num_classes); // A prior can only be matched to one gt bbox. break; } } match_data[i * num_preds_per_class + p] = label; } } // Get probability data. const Dtype* conf_gpu_data = conf_blob.gpu_data(); Blob<Dtype> prob_blob; prob_blob.ReshapeLike(conf_blob); if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { Dtype* prob_gpu_data = prob_blob.mutable_gpu_data(); SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1, prob_gpu_data); conf_gpu_data = prob_blob.gpu_data(); } // Compute the loss. Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1); Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data(); const int num_threads = num * num_preds_per_class; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ComputeConfLossKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, conf_gpu_data, num_preds_per_class, num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data); // Save the loss. all_conf_loss->clear(); const Dtype* loss_data = conf_loss_blob.cpu_data(); for (int i = 0; i < num; ++i) { vector<float> conf_loss(loss_data, loss_data + num_preds_per_class); all_conf_loss->push_back(conf_loss); loss_data += num_preds_per_class; } } // Explicit initialization. template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num, const int num_preds_per_class, const int num_classes, const int background_label_id, const ConfLossType loss_type, const vector<map<int, vector<int> > >& all_match_indices, const map<int, vector<NormalizedBBox> >& all_gt_bboxes, vector<vector<float> >* all_conf_loss); template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num, const int num_preds_per_class, const int num_classes, const int background_label_id, const ConfLossType loss_type, const vector<map<int, vector<int> > >& all_match_indices, const map<int, vector<NormalizedBBox> >& all_gt_bboxes, vector<vector<float> >* all_conf_loss); } // namespace caffe
7466c6cb35268e1a68c3165e4ec8bb6bee951243.cu
#include <algorithm> #include <functional> #include <map> #include <vector> // #include "thrust/functional.h" // #include "thrust/sort.h" #include "caffe/common.hpp" #include "caffe/util/bbox_util.hpp" namespace caffe { template <typename Dtype> __host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox, const bool normalized) { if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) { // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. return Dtype(0.); } else { const Dtype width = bbox[2] - bbox[0]; const Dtype height = bbox[3] - bbox[1]; if (normalized) { return width * height; } else { // If bbox is not within range [0, 1]. return (width + 1) * (height + 1); } } } template __host__ __device__ float BBoxSizeGPU(const float* bbox, const bool normalized); template __host__ __device__ double BBoxSizeGPU(const double* bbox, const bool normalized); template <typename Dtype> __host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1, const Dtype* bbox2) { if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] || bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) { return Dtype(0.); } else { const Dtype inter_xmin = max(bbox1[0], bbox2[0]); const Dtype inter_ymin = max(bbox1[1], bbox2[1]); const Dtype inter_xmax = min(bbox1[2], bbox2[2]); const Dtype inter_ymax = min(bbox1[3], bbox2[3]); const Dtype inter_width = inter_xmax - inter_xmin; const Dtype inter_height = inter_ymax - inter_ymin; const Dtype inter_size = inter_width * inter_height; const Dtype bbox1_size = BBoxSizeGPU(bbox1); const Dtype bbox2_size = BBoxSizeGPU(bbox2); return inter_size / (bbox1_size + bbox2_size - inter_size); } } template __host__ __device__ float JaccardOverlapGPU(const float* bbox1, const float* bbox2); template __host__ __device__ double JaccardOverlapGPU(const double* bbox1, const double* bbox2); template <typename Dtype> __device__ Dtype Min(const Dtype x, const Dtype y) { return x < y ? x : y; } template <typename Dtype> __device__ Dtype Max(const Dtype x, const Dtype y) { return x > y ? x : y; } template <typename Dtype> __device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) { for (int i = 0; i < 4; ++i) { clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.)); } } template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox); template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox); template <typename Dtype> __global__ void DecodeBBoxesKernel(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % 4; const int c = (index / 4) % num_loc_classes; const int d = (index / 4 / num_loc_classes) % num_priors; if (!share_location && c == background_label_id) { // Ignore background class if not share_location. return; } const int pi = d * 4; const int vi = pi + num_priors * 4; if (code_type == PriorBoxParameter_CodeType_CORNER) { if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = prior_data[pi + i] + loc_data[index]; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i]; } } else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) { const Dtype p_xmin = prior_data[pi]; const Dtype p_ymin = prior_data[pi + 1]; const Dtype p_xmax = prior_data[pi + 2]; const Dtype p_ymax = prior_data[pi + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; const Dtype prior_center_x = (p_xmin + p_xmax) / 2.; const Dtype prior_center_y = (p_ymin + p_ymax) / 2.; const Dtype xmin = loc_data[index - i]; const Dtype ymin = loc_data[index - i + 1]; const Dtype xmax = loc_data[index - i + 2]; const Dtype ymax = loc_data[index - i + 3]; Dtype decode_bbox_center_x, decode_bbox_center_y; Dtype decode_bbox_width, decode_bbox_height; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to retore the offset // predictions. decode_bbox_center_x = xmin * prior_width + prior_center_x; decode_bbox_center_y = ymin * prior_height + prior_center_y; decode_bbox_width = exp(xmax) * prior_width; decode_bbox_height = exp(ymax) * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x; decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y; decode_bbox_width = exp(prior_data[vi + 2] * xmax) * prior_width; decode_bbox_height = exp(prior_data[vi + 3] * ymax) * prior_height; } switch (i) { case 0: bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.; break; case 1: bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.; break; case 2: bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.; break; case 3: bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.; break; } } else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) { const Dtype p_xmin = prior_data[pi]; const Dtype p_ymin = prior_data[pi + 1]; const Dtype p_xmax = prior_data[pi + 2]; const Dtype p_ymax = prior_data[pi + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; Dtype p_size; if (i == 0 || i == 2) { p_size = prior_width; } else { p_size = prior_height; } if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size; } } else { // Unknown code type. } if (clip_bbox) { bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.)); } } } template <typename Dtype> __global__ void CasRegDecodeBBoxesKernel(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % 4; const int c = (index / 4) % num_loc_classes; const int d = (index / 4 / num_loc_classes) % num_priors; if (!share_location && c == background_label_id) { // Ignore background class if not share_location. return; } const int pi = d * 4; const int vi = pi + num_priors * 4; if (code_type == PriorBoxParameter_CodeType_CORNER) { if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = bbox_data[index] + loc_data[index]; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = bbox_data[index] + loc_data[index] * prior_data[vi + i]; } } else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) { const Dtype p_xmin = bbox_data[index - i]; const Dtype p_ymin = bbox_data[index - i + 1]; const Dtype p_xmax = bbox_data[index - i + 2]; const Dtype p_ymax = bbox_data[index - i + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; const Dtype prior_center_x = (p_xmin + p_xmax) / 2.; const Dtype prior_center_y = (p_ymin + p_ymax) / 2.; const Dtype xmin = loc_data[index - i]; const Dtype ymin = loc_data[index - i + 1]; const Dtype xmax = loc_data[index - i + 2]; const Dtype ymax = loc_data[index - i + 3]; Dtype decode_bbox_center_x, decode_bbox_center_y; Dtype decode_bbox_width, decode_bbox_height; if (variance_encoded_in_target) { // variance is encoded in target, we simply need to retore the offset // predictions. decode_bbox_center_x = xmin * prior_width + prior_center_x; decode_bbox_center_y = ymin * prior_height + prior_center_y; decode_bbox_width = exp(xmax) * prior_width; decode_bbox_height = exp(ymax) * prior_height; } else { // variance is encoded in bbox, we need to scale the offset accordingly. decode_bbox_center_x = prior_data[vi] * xmin * prior_width + prior_center_x; decode_bbox_center_y = prior_data[vi + 1] * ymin * prior_height + prior_center_y; decode_bbox_width = exp(prior_data[vi + 2] * xmax) * prior_width; decode_bbox_height = exp(prior_data[vi + 3] * ymax) * prior_height; } switch (i) { case 0: bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.; break; case 1: bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.; break; case 2: bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.; break; case 3: bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.; break; } } else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) { const Dtype p_xmin = bbox_data[index - i]; const Dtype p_ymin = bbox_data[index - i + 1]; const Dtype p_xmax = bbox_data[index - i + 2]; const Dtype p_ymax = bbox_data[index - i + 3]; const Dtype prior_width = p_xmax - p_xmin; const Dtype prior_height = p_ymax - p_ymin; Dtype p_size; if (i == 0 || i == 2) { p_size = prior_width; } else { p_size = prior_height; } if (variance_encoded_in_target) { // variance is encoded in target, we simply need to add the offset // predictions. bbox_data[index] = bbox_data[index] + loc_data[index] * p_size; } else { // variance is encoded in bbox, we need to scale the offset accordingly. bbox_data[index] = bbox_data[index] + loc_data[index] * prior_data[vi + i] * p_size; } } else { // Unknown code type. } if (clip_bbox) { bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.)); } } } template <typename Dtype> void DecodeBBoxesGPU(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data) { // NOLINT_NEXT_LINE(whitespace/operators) DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, bbox_data); CUDA_POST_KERNEL_CHECK; } template void DecodeBBoxesGPU(const int nthreads, const float* loc_data, const float* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, float* bbox_data); template void DecodeBBoxesGPU(const int nthreads, const double* loc_data, const double* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, double* bbox_data); /************************************************************************************/ template <typename Dtype> void CasRegDecodeBBoxesGPU(const int nthreads, const Dtype* loc_data, const Dtype* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, Dtype* bbox_data, const Dtype* arm_loc_data) { // NOLINT_NEXT_LINE(whitespace/operators) DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, arm_loc_data, prior_data, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, bbox_data); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) CasRegDecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type, variance_encoded_in_target, num_priors, share_location, num_loc_classes, background_label_id, clip_bbox, bbox_data); CUDA_POST_KERNEL_CHECK; } template void CasRegDecodeBBoxesGPU(const int nthreads, const float* loc_data, const float* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, float* bbox_data, const float* arm_loc_data); template void CasRegDecodeBBoxesGPU(const int nthreads, const double* loc_data, const double* prior_data, const CodeType code_type, const bool variance_encoded_in_target, const int num_priors, const bool share_location, const int num_loc_classes, const int background_label_id, const bool clip_bbox, double* bbox_data, const double* arm_loc_data); /************************************************************************************/ template <typename Dtype> __global__ void PermuteDataKernel(const int nthreads, const Dtype* data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % num_dim; const int c = (index / num_dim) % num_classes; const int d = (index / num_dim / num_classes) % num_data; const int n = index / num_dim / num_classes / num_data; const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i; new_data[new_index] = data[index]; } } template <typename Dtype> void PermuteDataGPU(const int nthreads, const Dtype* data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data) { // NOLINT_NEXT_LINE(whitespace/operators) PermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, num_classes, num_data, num_dim, new_data); CUDA_POST_KERNEL_CHECK; } template void PermuteDataGPU(const int nthreads, const float* data, const int num_classes, const int num_data, const int num_dim, float* new_data); template void PermuteDataGPU(const int nthreads, const double* data, const int num_classes, const int num_data, const int num_dim, double* new_data); template <typename Dtype> __global__ void OSPermuteDataKernel(const int nthreads, const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data, float objectness_score) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index % num_dim; const int c = (index / num_dim) % num_classes; const int d = (index / num_dim / num_classes) % num_data; const int n = index / num_dim / num_classes / num_data; const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i; const int arm_index = ((n * num_data + d) * 2 + 1) * num_dim + i; if (arm_data[arm_index] < objectness_score) { if (c == 0) new_data[new_index] = 1.0; else new_data[new_index] = 0.0; } else { new_data[new_index] = data[index]; } } } template <typename Dtype> void OSPermuteDataGPU(const int nthreads, const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data, const int num_dim, Dtype* new_data, float objectness_score) { // NOLINT_NEXT_LINE(whitespace/operators) OSPermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, arm_data, num_classes, num_data, num_dim, new_data, objectness_score); CUDA_POST_KERNEL_CHECK; } template void OSPermuteDataGPU(const int nthreads, const float* data, const float* arm_data, const int num_classes, const int num_data, const int num_dim, float* new_data, float objectness_score); template void OSPermuteDataGPU(const int nthreads, const double* data, const double* arm_data, const int num_classes, const int num_data, const int num_dim, double* new_data, float objectness_score); template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] = channel_data[index] - channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> void SoftMaxGPU(const Dtype* data, const int outer_num, const int channels, const int inner_num, Dtype* prob) { vector<int> shape(4, 1); shape[0] = outer_num; shape[1] = channels; shape[2] = inner_num; Blob<Dtype> scale(shape); Dtype* scale_data = scale.mutable_gpu_data(); int count = outer_num * channels * inner_num; // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num), CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num, data, scale_data, prob); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, prob, prob); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num), CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, prob, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num, scale_data, prob); } template void SoftMaxGPU(const float* data, const int outer_num, const int channels, const int inner_num, float* prob); template void SoftMaxGPU(const double* data, const int outer_num, const int channels, const int inner_num, double* prob); template <typename Dtype> __global__ void ComputeOverlappedKernel(const int nthreads, const Dtype* bbox_data, const int num_bboxes, const int num_classes, const Dtype overlap_threshold, bool* overlapped_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int j = index % num_bboxes; const int i = (index / num_bboxes) % num_bboxes; if (i == j) { // Ignore same bbox. return; } const int c = (index / num_bboxes / num_bboxes) % num_classes; const int n = index / num_bboxes / num_bboxes / num_classes; // Compute overlap between i-th bbox and j-th bbox. const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4; const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4; const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i, bbox_data + start_loc_j); if (overlap > overlap_threshold) { overlapped_data[index] = true; } } } template <typename Dtype> void ComputeOverlappedGPU(const int nthreads, const Dtype* bbox_data, const int num_bboxes, const int num_classes, const Dtype overlap_threshold, bool* overlapped_data) { // NOLINT_NEXT_LINE(whitespace/operators) ComputeOverlappedKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, num_bboxes, num_classes, overlap_threshold, overlapped_data); CUDA_POST_KERNEL_CHECK; } template void ComputeOverlappedGPU(const int nthreads, const float* bbox_data, const int num_bboxes, const int num_classes, const float overlap_threshold, bool* overlapped_data); template void ComputeOverlappedGPU(const int nthreads, const double* bbox_data, const int num_bboxes, const int num_classes, const double overlap_threshold, bool* overlapped_data); template <typename Dtype> __global__ void ComputeOverlappedByIdxKernel(const int nthreads, const Dtype* bbox_data, const Dtype overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int j = index % num_idx; const int i = (index / num_idx); if (i == j) { // Ignore same bbox. return; } // Compute overlap between i-th bbox and j-th bbox. const int start_loc_i = idx[i] * 4; const int start_loc_j = idx[j] * 4; const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i, bbox_data + start_loc_j); if (overlap > overlap_threshold) { overlapped_data[index] = true; } } } template <typename Dtype> void ComputeOverlappedByIdxGPU(const int nthreads, const Dtype* bbox_data, const Dtype overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data) { // NOLINT_NEXT_LINE(whitespace/operators) ComputeOverlappedByIdxKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, overlap_threshold, idx, num_idx, overlapped_data); CUDA_POST_KERNEL_CHECK; } template void ComputeOverlappedByIdxGPU(const int nthreads, const float* bbox_data, const float overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data); template void ComputeOverlappedByIdxGPU(const int nthreads, const double* bbox_data, const double overlap_threshold, const int* idx, const int num_idx, bool* overlapped_data); template <typename Dtype> void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, vector<int>* indices) { // Keep part of detections whose scores are higher than confidence threshold. vector<int> idx; vector<Dtype> confidences; for (int i = 0; i < num_bboxes; ++i) { if (conf_data[i] > confidence_threshold) { idx.push_back(i); confidences.push_back(conf_data[i]); } } int num_remain = confidences.size(); if (num_remain == 0) { return; } // Sort detections based on score. // thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0], ////////////////////////////////////////////////// by jyz // thrust::greater<Dtype>()); ////////////////////////////////////////////////// by jyz if (top_k > -1 && top_k < num_remain) { num_remain = top_k; } // Compute overlap between remaining detections. Blob<int> idx_blob(1, 1, 1, num_remain); int* idx_data = idx_blob.mutable_cpu_data(); std::copy(idx.begin(), idx.begin() + num_remain, idx_data); Blob<bool> overlapped(1, 1, num_remain, num_remain); const int total_bboxes = overlapped.count(); bool* overlapped_data = overlapped.mutable_gpu_data(); ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold, idx_blob.gpu_data(), num_remain, overlapped_data); // Do non-maximum suppression based on overlapped results. const bool* overlapped_results = overlapped.cpu_data(); vector<int> selected_indices; ApplyNMS(overlapped_results, num_remain, &selected_indices); // Put back the selected information. for (int i = 0; i < selected_indices.size(); ++i) { indices->push_back(idx[selected_indices[i]]); } } template void ApplyNMSGPU(const float* bbox_data, const float* conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, vector<int>* indices); template void ApplyNMSGPU(const double* bbox_data, const double* conf_data, const int num_bboxes, const float confidence_threshold, const int top_k, const float nms_threshold, vector<int>* indices); template <typename Dtype> __global__ void GetDetectionsKernel(const int nthreads, const Dtype* bbox_data, const Dtype* conf_data, const int image_id, const int label, const int* indices, const bool clip_bbox, Dtype* detection_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int det_idx = indices[index]; detection_data[index * 7] = image_id; detection_data[index * 7 + 1] = label; detection_data[index * 7 + 2] = conf_data[det_idx]; if (clip_bbox) { ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3])); } else { for (int i = 0; i < 4; ++i) { detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i]; } } } } template <typename Dtype> void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data, const int image_id, const int label, const vector<int>& indices, const bool clip_bbox, Blob<Dtype>* detection_blob) { // Store selected indices in array. int num_det = indices.size(); if (num_det == 0) { return; } Blob<int> idx_blob(1, 1, 1, num_det); int* idx_data = idx_blob.mutable_cpu_data(); std::copy(indices.begin(), indices.end(), idx_data); // Prepare detection_blob. detection_blob->Reshape(1, 1, num_det, 7); Dtype* detection_data = detection_blob->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) GetDetectionsKernel<Dtype><<<CAFFE_GET_BLOCKS(num_det), CAFFE_CUDA_NUM_THREADS>>>(num_det, bbox_data, conf_data, image_id, label, idx_blob.gpu_data(), clip_bbox, detection_data); CUDA_POST_KERNEL_CHECK; } template void GetDetectionsGPU(const float* bbox_data, const float* conf_data, const int image_id, const int label, const vector<int>& indices, const bool clip_bbox, Blob<float>* detection_blob); template void GetDetectionsGPU(const double* bbox_data, const double* conf_data, const int image_id, const int label, const vector<int>& indices, const bool clip_bbox, Blob<double>* detection_blob); template <typename Dtype> __global__ void ComputeConfLossKernel(const int nthreads, const Dtype* conf_data, const int num_preds_per_class, const int num_classes, const ConfLossType loss_type, const Dtype* match_data, Dtype* conf_loss_data) { CUDA_KERNEL_LOOP(index, nthreads) { int label = match_data[index]; int num = index / num_preds_per_class; int p = index % num_preds_per_class; int start_idx = (num * num_preds_per_class + p) * num_classes; Dtype loss = 0; if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { // Compute softmax probability. Dtype prob = conf_data[start_idx + label]; loss = -log(Max(prob, Dtype(FLT_MIN))); } else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) { int target = 0; for (int c = 0; c < num_classes; ++c) { if (c == label) { target = 1; } else { target = 0; } Dtype input = conf_data[start_idx + c]; loss -= input * (target - (input >= 0)) - log(1 + exp(input - 2 * input * (input >= 0))); } } conf_loss_data[index] = loss; } } template <typename Dtype> void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num, const int num_preds_per_class, const int num_classes, const int background_label_id, const ConfLossType loss_type, const vector<map<int, vector<int> > >& all_match_indices, const map<int, vector<NormalizedBBox> >& all_gt_bboxes, vector<vector<float> >* all_conf_loss) { CHECK_LT(background_label_id, num_classes); Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1); Dtype* match_data = match_blob.mutable_cpu_data(); for (int i = 0; i < num; ++i) { const map<int, vector<int> >& match_indices = all_match_indices[i]; for (int p = 0; p < num_preds_per_class; ++p) { // Get the label index. int label = background_label_id; for (map<int, vector<int> >::const_iterator it = match_indices.begin(); it != match_indices.end(); ++it) { const vector<int>& match_index = it->second; CHECK_EQ(match_index.size(), num_preds_per_class); if (match_index[p] > -1) { CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end()); const vector<NormalizedBBox>& gt_bboxes = all_gt_bboxes.find(i)->second; CHECK_LT(match_index[p], gt_bboxes.size()); label = gt_bboxes[match_index[p]].label(); CHECK_GE(label, 0); CHECK_NE(label, background_label_id); CHECK_LT(label, num_classes); // A prior can only be matched to one gt bbox. break; } } match_data[i * num_preds_per_class + p] = label; } } // Get probability data. const Dtype* conf_gpu_data = conf_blob.gpu_data(); Blob<Dtype> prob_blob; prob_blob.ReshapeLike(conf_blob); if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { Dtype* prob_gpu_data = prob_blob.mutable_gpu_data(); SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1, prob_gpu_data); conf_gpu_data = prob_blob.gpu_data(); } // Compute the loss. Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1); Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data(); const int num_threads = num * num_preds_per_class; // NOLINT_NEXT_LINE(whitespace/operators) ComputeConfLossKernel<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads, conf_gpu_data, num_preds_per_class, num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data); // Save the loss. all_conf_loss->clear(); const Dtype* loss_data = conf_loss_blob.cpu_data(); for (int i = 0; i < num; ++i) { vector<float> conf_loss(loss_data, loss_data + num_preds_per_class); all_conf_loss->push_back(conf_loss); loss_data += num_preds_per_class; } } // Explicit initialization. template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num, const int num_preds_per_class, const int num_classes, const int background_label_id, const ConfLossType loss_type, const vector<map<int, vector<int> > >& all_match_indices, const map<int, vector<NormalizedBBox> >& all_gt_bboxes, vector<vector<float> >* all_conf_loss); template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num, const int num_preds_per_class, const int num_classes, const int background_label_id, const ConfLossType loss_type, const vector<map<int, vector<int> > >& all_match_indices, const map<int, vector<NormalizedBBox> >& all_gt_bboxes, vector<vector<float> >* all_conf_loss); } // namespace caffe
48a1b602786888add9e55d49794b7080bf617049.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_zerores_kernel; int xdim0_zerores_kernel_h = -1; __constant__ int xdim1_zerores_kernel; int xdim1_zerores_kernel_h = -1; __constant__ int xdim2_zerores_kernel; int xdim2_zerores_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x) (x) #define OPS_ACC1(x) (x) #define OPS_ACC2(x) (x) //user function __device__ void zerores_kernel_gpu(double *rho_res, double *rhou_res, double *rhoE_res) { rho_res[OPS_ACC0(0)] = 0.0; rhou_res[OPS_ACC1(0)] = 0.0; rhoE_res[OPS_ACC2(0)] = 0.0; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_zerores_kernel( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, int size0 ){ int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1; arg1 += idx_x * 1*1; arg2 += idx_x * 1*1; if (idx_x < size0) { zerores_kernel_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_zerores_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"zerores_kernel"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<1; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<1; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_zerores_kernel_h || xdim1 != xdim1_zerores_kernel_h || xdim2 != xdim2_zerores_kernel_h) { hipMemcpyToSymbol( xdim0_zerores_kernel, &xdim0, sizeof(int) ); xdim0_zerores_kernel_h = xdim0; hipMemcpyToSymbol( xdim1_zerores_kernel, &xdim1, sizeof(int) ); xdim1_zerores_kernel_h = xdim1; hipMemcpyToSymbol( xdim2_zerores_kernel, &xdim2, sizeof(int) ); xdim2_zerores_kernel_h = xdim2; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1); dim3 tblock(OPS_block_size_x,1,1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0) hipLaunchKernelGGL(( ops_zerores_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2],x_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<2; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_zerores_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"zerores_kernel"); } ops_enqueue_kernel(desc); } #endif
48a1b602786888add9e55d49794b7080bf617049.cu
// // auto-generated by ops.py // __constant__ int xdim0_zerores_kernel; int xdim0_zerores_kernel_h = -1; __constant__ int xdim1_zerores_kernel; int xdim1_zerores_kernel_h = -1; __constant__ int xdim2_zerores_kernel; int xdim2_zerores_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x) (x) #define OPS_ACC1(x) (x) #define OPS_ACC2(x) (x) //user function __device__ void zerores_kernel_gpu(double *rho_res, double *rhou_res, double *rhoE_res) { rho_res[OPS_ACC0(0)] = 0.0; rhou_res[OPS_ACC1(0)] = 0.0; rhoE_res[OPS_ACC2(0)] = 0.0; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_zerores_kernel( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, int size0 ){ int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1; arg1 += idx_x * 1*1; arg2 += idx_x * 1*1; if (idx_x < size0) { zerores_kernel_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_zerores_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"zerores_kernel"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<1; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<1; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_zerores_kernel_h || xdim1 != xdim1_zerores_kernel_h || xdim2 != xdim2_zerores_kernel_h) { cudaMemcpyToSymbol( xdim0_zerores_kernel, &xdim0, sizeof(int) ); xdim0_zerores_kernel_h = xdim0; cudaMemcpyToSymbol( xdim1_zerores_kernel, &xdim1, sizeof(int) ); xdim1_zerores_kernel_h = xdim1; cudaMemcpyToSymbol( xdim2_zerores_kernel, &xdim2, sizeof(int) ); xdim2_zerores_kernel_h = xdim2; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1); dim3 tblock(OPS_block_size_x,1,1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0) ops_zerores_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2],x_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<2; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_zerores_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"zerores_kernel"); } ops_enqueue_kernel(desc); } #endif
bbab9942349e5b13f509523ea8d76dc4547ec296.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <numeric> #include <stdlib.h> using namespace std; static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define BUF_LEN 8 #define N 2 class B { public: __device__ virtual unsigned long f1(unsigned int hash) {return 0;} __device__ virtual unsigned long f2(unsigned int hash) {return 0;} __device__ virtual unsigned long f3(unsigned int hash) {return 0;} __device__ virtual unsigned long f4(unsigned int hash) {return 0;} }; class D:public B { public: __device__ __noinline__ unsigned long f1(unsigned int hash); __device__ __noinline__ unsigned long f2(unsigned int hash); __device__ __noinline__ unsigned long f3(unsigned int hash); __device__ __noinline__ unsigned long f4(unsigned int hash); }; __device__ __noinline__ unsigned long D::f1(unsigned int hash) {return hash;} __device__ __noinline__ unsigned long D::f2(unsigned int hash) {return 2*hash;} __device__ __noinline__ unsigned long D::f3(unsigned int hash) {return 3*hash;} __device__ __noinline__ unsigned long D::f4(unsigned int hash) {return 4*hash;} __device__ __noinline__ unsigned long secret() { printf("Hello Admin!\n"); return 0x9999999999999999; } __device__ unsigned long *buf; __device__ __noinline__ unsigned long unsafe(unsigned long *input,unsigned int len) { unsigned long res=0; unsigned long hash=5381; if(blockIdx.x==0) buf=(unsigned long *)malloc(sizeof(unsigned long)*BUF_LEN); //unsigned long *buf1=(unsigned long *)malloc(sizeof(unsigned long)*BUF_LEN); D *objD=new D; printf("blockIdx.x %d, buf %p\n",blockIdx.x,buf); //printf("threadid %d, buf1 %p\n",threadIdx.x,buf1); printf("blockIdx.x %d, secret %p\n",blockIdx.x,secret); printf("blockIdx.x %d, objD %p\n",blockIdx.x,objD); if(blockIdx.x==0) for(int i=0;i<len;i++) { buf[i]=input[i]; } for(int i=0;i<BUF_LEN;i++) hash=((hash<<5)+hash)+buf[i]; res=objD->f1(hash); res=objD->f2(res); res=objD->f3(res); res=objD->f4(res); return res; } __global__ void test_kernel(unsigned long *hashes,unsigned long *input,unsigned int len,int *admin) { unsigned long my_hash; int idx=blockDim.x*blockIdx.x+threadIdx.x; if(*admin) { my_hash=secret(); printf("%p\n",secret); } else my_hash=unsafe(input+(30*idx),len); hashes[idx]=my_hash; } int main() { unsigned long input[100]; unsigned int len=8; int admin=0; unsigned long hashes[N]; unsigned long *dev_hashes; unsigned long *dev_input; int *dev_admin; for(int i=0;i<4;i++) input[i]=0x1b8000001b8; for(int i=4;i<30;i++) input[i]=0x50263f920; for(int i=30;i<34;i++) input[i]=0; for(int i=34;i<60;i++) input[i]=0; CUDA_CHECK_RETURN(hipMalloc((void**)&dev_hashes,N*sizeof(unsigned long))); CUDA_CHECK_RETURN(hipMalloc((void**)&dev_input,100*sizeof(unsigned long))); CUDA_CHECK_RETURN(hipMalloc((void**)&dev_admin,sizeof(int))); CUDA_CHECK_RETURN(hipMemcpy(dev_input,input,100*sizeof(unsigned long),hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(dev_admin,&admin,sizeof( int),hipMemcpyHostToDevice)); // cout<<"start!"<<endl; //0x50263f920 //0x1c0000001c0 hipLaunchKernelGGL(( test_kernel), dim3(N),dim3(1), 0, 0, dev_hashes,dev_input, len,dev_admin); // CUDA_CHECK_RETURN(hipMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),hipMemcpyDeviceToHost)); for(int i=0;i<N;i++) { printf("%lx\n",hashes[i]); } cout<<endl; hipFree(dev_hashes); hipFree(dev_admin); hipFree(dev_input); //CUDA_CHECK_RETURN(hipFree(dev_hashes)); //CUDA_CHECK_RETURN(hipFree(dev_admin)); //CUDA_CHECK_RETURN(hipFree(dev_len)); //CUDA_CHECK_RETURN(hipFree(dev_input)); return 0; } static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
bbab9942349e5b13f509523ea8d76dc4547ec296.cu
#include <stdio.h> #include <iostream> #include <numeric> #include <stdlib.h> using namespace std; static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define BUF_LEN 8 #define N 2 class B { public: __device__ virtual unsigned long f1(unsigned int hash) {return 0;} __device__ virtual unsigned long f2(unsigned int hash) {return 0;} __device__ virtual unsigned long f3(unsigned int hash) {return 0;} __device__ virtual unsigned long f4(unsigned int hash) {return 0;} }; class D:public B { public: __device__ __noinline__ unsigned long f1(unsigned int hash); __device__ __noinline__ unsigned long f2(unsigned int hash); __device__ __noinline__ unsigned long f3(unsigned int hash); __device__ __noinline__ unsigned long f4(unsigned int hash); }; __device__ __noinline__ unsigned long D::f1(unsigned int hash) {return hash;} __device__ __noinline__ unsigned long D::f2(unsigned int hash) {return 2*hash;} __device__ __noinline__ unsigned long D::f3(unsigned int hash) {return 3*hash;} __device__ __noinline__ unsigned long D::f4(unsigned int hash) {return 4*hash;} __device__ __noinline__ unsigned long secret() { printf("Hello Admin!\n"); return 0x9999999999999999; } __device__ unsigned long *buf; __device__ __noinline__ unsigned long unsafe(unsigned long *input,unsigned int len) { unsigned long res=0; unsigned long hash=5381; if(blockIdx.x==0) buf=(unsigned long *)malloc(sizeof(unsigned long)*BUF_LEN); //unsigned long *buf1=(unsigned long *)malloc(sizeof(unsigned long)*BUF_LEN); D *objD=new D; printf("blockIdx.x %d, buf %p\n",blockIdx.x,buf); //printf("threadid %d, buf1 %p\n",threadIdx.x,buf1); printf("blockIdx.x %d, secret %p\n",blockIdx.x,secret); printf("blockIdx.x %d, objD %p\n",blockIdx.x,objD); if(blockIdx.x==0) for(int i=0;i<len;i++) { buf[i]=input[i]; } for(int i=0;i<BUF_LEN;i++) hash=((hash<<5)+hash)+buf[i]; res=objD->f1(hash); res=objD->f2(res); res=objD->f3(res); res=objD->f4(res); return res; } __global__ void test_kernel(unsigned long *hashes,unsigned long *input,unsigned int len,int *admin) { unsigned long my_hash; int idx=blockDim.x*blockIdx.x+threadIdx.x; if(*admin) { my_hash=secret(); printf("%p\n",secret); } else my_hash=unsafe(input+(30*idx),len); hashes[idx]=my_hash; } int main() { unsigned long input[100]; unsigned int len=8; int admin=0; unsigned long hashes[N]; unsigned long *dev_hashes; unsigned long *dev_input; int *dev_admin; for(int i=0;i<4;i++) input[i]=0x1b8000001b8; for(int i=4;i<30;i++) input[i]=0x50263f920; for(int i=30;i<34;i++) input[i]=0; for(int i=34;i<60;i++) input[i]=0; CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_hashes,N*sizeof(unsigned long))); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_input,100*sizeof(unsigned long))); CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_admin,sizeof(int))); CUDA_CHECK_RETURN(cudaMemcpy(dev_input,input,100*sizeof(unsigned long),cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(dev_admin,&admin,sizeof( int),cudaMemcpyHostToDevice)); // cout<<"start!"<<endl; //0x50263f920 //0x1c0000001c0 test_kernel<<<N,1>>>(dev_hashes,dev_input, len,dev_admin); // CUDA_CHECK_RETURN(cudaMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(&hashes,dev_hashes,N*sizeof(unsigned long),cudaMemcpyDeviceToHost)); for(int i=0;i<N;i++) { printf("%lx\n",hashes[i]); } cout<<endl; cudaFree(dev_hashes); cudaFree(dev_admin); cudaFree(dev_input); //CUDA_CHECK_RETURN(cudaFree(dev_hashes)); //CUDA_CHECK_RETURN(cudaFree(dev_admin)); //CUDA_CHECK_RETURN(cudaFree(dev_len)); //CUDA_CHECK_RETURN(cudaFree(dev_input)); return 0; } static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
9f821f52ce0065de1e38aee94df245ea914c4760.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <THH/THHAtomics.cuh> // for gpuAtomicAdd #include <c10/util/Exception.h> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)::ceil((float)((a + 1) * c) / b); } // 5d tensor B x D x T x H x W // All kernels view batch dim B and dim D as collapsed. /* * Description: * this function adaptively average pools an input 5D tensor along dimensions * 2, 3, and 4 5D input, 5D output * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragepool( scalar_t *input, scalar_t *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterates on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time scalar_t *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time scalar_t *output_dt = output + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; scalar_t *ptr_output = output_dt + oh*osizeW + ow; accscalar_t sum = static_cast<accscalar_t>(0); int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { scalar_t val = ptr_input[ih*istrideH + iw*istrideW]; sum += static_cast<accscalar_t>(val); } } ptr_input += istrideT; // next input frame } // Update output const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW); *ptr_output = static_cast<scalar_t>(sum / divide_factor); } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragepool_loop( scalar_t *input_data, scalar_t *output_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptiveaveragepool<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on input pixels int it, ih, iw; // compute offsets based on thread/block ID int istartH = blockIdx.y * blockDim.y + threadIdx.y; int iendH = isizeH; int istepH = gridDim.y * blockDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // select input plane int64_t i_plane = blockIdx.x + offsetZ; it = i_plane % isizeT; // output frame/time int d = i_plane / isizeT; // slice/feature // output frame/time range is fixed. int ostartT = start_index(it, isizeT, osizeT); int oendT = end_index(it, isizeT, osizeT); // gradInput offset by slice/feature and frame/time. scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW; // gradOutput offset by slice/feature and earliest relevant frame/time scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW; // For all input pixels... for (ih = istartH; ih < iendH; ih += istepH) { int ostartH = start_index(ih, isizeH, osizeH); int oendH = end_index(ih, isizeH, osizeH); for (iw = istartW; iw < iendW; iw += istepW) { int ostartW = start_index(iw, isizeW, osizeW); int oendW = end_index(iw, isizeW, osizeW); // Compute the gradients from corresponding output pixels scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw; scalar_t *ptr_gradOutput = gradOutput_dt; // for all relevant output pixels int ot, oh, ow; for (ot = ostartT; ot < oendT; ++ot) { int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT); for (oh = ostartH; oh < oendH; ++oh) { int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH); for (ow = ostartW; ow < oendW; ++ow) { int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW); const accscalar_t divide_factor = kW * kH * kT; accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*isizeW + ow] / divide_factor); *ptr_gradInput += static_cast<scalar_t>(grad_delta); } } ptr_gradOutput += osizeH*osizeW; // next output frame } } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragegradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptiveaveragegradinput<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * (uses atomic add) * */ template <typename scalar_t> __global__ void atomicadaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // output slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // gradInput offset by slice/feature and earliest relevant frame/time scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW; // gradOutput offset by slice/feature and frame/time scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients from corresponding input pixels scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW; scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow; scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW; int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { gpuAtomicAdd(&(ptr_gradInput[ih*isizeW + iw]), grad_delta); } } ptr_gradInput += isizeH*isizeW; // next input frame } } } } template <typename scalar_t> void atomicadaptiveaveragegradinput_loop( scalar_t* gradInput_data, scalar_t* gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( atomicadaptiveaveragegradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } // 5D tensor B x D x T x H x w void adaptive_avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef& output_size) { TensorArg output_arg{output, "output", 1}; TensorArg input_arg{input_, "input_", 2}; checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg}); for (int64_t i = 0; i < input_.ndimension(); i++) { TORCH_CHECK( input_.size(i) > 0, "adaptive_avg_pool3d_cuda(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being empty"); } TORCH_CHECK( (input_.ndimension() == 4 || input_.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); // the jit sometimes passes output_size.size() == 1 TORCH_CHECK( output_size.size() == 1 || output_size.size() == 3, "adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size[1]; int64_t osizeW = output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); adaptiveaveragepool_loop<scalar_t, accscalar_t>( input_data, output_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); }); } void adaptive_avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( "adaptive_avg_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = atomic ? sizeD * osizeT : sizeD * isizeT; } else { int sizeB = input.size(0); totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); atomicadaptiveaveragegradinput_loop( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); adaptiveaveragegradinput_loop<scalar_t, accscalar_t>( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } } } // namespace Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor adaptive_avg_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda"); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } Tensor adaptive_avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } } // namespace native } // namespace at
9f821f52ce0065de1e38aee94df245ea914c4760.cu
#include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <THC/THCAtomics.cuh> // for gpuAtomicAdd #include <c10/util/Exception.h> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)std::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)std::ceil((float)((a + 1) * c) / b); } // 5d tensor B x D x T x H x W // All kernels view batch dim B and dim D as collapsed. /* * Description: * this function adaptively average pools an input 5D tensor along dimensions * 2, 3, and 4 5D input, 5D output * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragepool( scalar_t *input, scalar_t *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterates on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time scalar_t *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time scalar_t *output_dt = output + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; scalar_t *ptr_output = output_dt + oh*osizeW + ow; accscalar_t sum = static_cast<accscalar_t>(0); int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { scalar_t val = ptr_input[ih*istrideH + iw*istrideW]; sum += static_cast<accscalar_t>(val); } } ptr_input += istrideT; // next input frame } // Update output const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW); *ptr_output = static_cast<scalar_t>(sum / divide_factor); } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragepool_loop( scalar_t *input_data, scalar_t *output_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptiveaveragepool<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on input pixels int it, ih, iw; // compute offsets based on thread/block ID int istartH = blockIdx.y * blockDim.y + threadIdx.y; int iendH = isizeH; int istepH = gridDim.y * blockDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // select input plane int64_t i_plane = blockIdx.x + offsetZ; it = i_plane % isizeT; // output frame/time int d = i_plane / isizeT; // slice/feature // output frame/time range is fixed. int ostartT = start_index(it, isizeT, osizeT); int oendT = end_index(it, isizeT, osizeT); // gradInput offset by slice/feature and frame/time. scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW; // gradOutput offset by slice/feature and earliest relevant frame/time scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW; // For all input pixels... for (ih = istartH; ih < iendH; ih += istepH) { int ostartH = start_index(ih, isizeH, osizeH); int oendH = end_index(ih, isizeH, osizeH); for (iw = istartW; iw < iendW; iw += istepW) { int ostartW = start_index(iw, isizeW, osizeW); int oendW = end_index(iw, isizeW, osizeW); // Compute the gradients from corresponding output pixels scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw; scalar_t *ptr_gradOutput = gradOutput_dt; // for all relevant output pixels int ot, oh, ow; for (ot = ostartT; ot < oendT; ++ot) { int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT); for (oh = ostartH; oh < oendH; ++oh) { int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH); for (ow = ostartW; ow < oendW; ++ow) { int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW); const accscalar_t divide_factor = kW * kH * kT; accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*isizeW + ow] / divide_factor); *ptr_gradInput += static_cast<scalar_t>(grad_delta); } } ptr_gradOutput += osizeH*osizeW; // next output frame } } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragegradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptiveaveragegradinput<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * (uses atomic add) * */ template <typename scalar_t> __global__ void atomicadaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // output slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // gradInput offset by slice/feature and earliest relevant frame/time scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW; // gradOutput offset by slice/feature and frame/time scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients from corresponding input pixels scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW; scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow; scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW; int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { gpuAtomicAdd(&(ptr_gradInput[ih*isizeW + iw]), grad_delta); } } ptr_gradInput += isizeH*isizeW; // next input frame } } } } template <typename scalar_t> void atomicadaptiveaveragegradinput_loop( scalar_t* gradInput_data, scalar_t* gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); atomicadaptiveaveragegradinput<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } // 5D tensor B x D x T x H x w void adaptive_avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef& output_size) { TensorArg output_arg{output, "output", 1}; TensorArg input_arg{input_, "input_", 2}; checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg}); for (int64_t i = 0; i < input_.ndimension(); i++) { TORCH_CHECK( input_.size(i) > 0, "adaptive_avg_pool3d_cuda(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being empty"); } TORCH_CHECK( (input_.ndimension() == 4 || input_.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); // the jit sometimes passes output_size.size() == 1 TORCH_CHECK( output_size.size() == 1 || output_size.size() == 3, "adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size[1]; int64_t osizeW = output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); adaptiveaveragepool_loop<scalar_t, accscalar_t>( input_data, output_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); }); } void adaptive_avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( "adaptive_avg_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = atomic ? sizeD * osizeT : sizeD * isizeT; } else { int sizeB = input.size(0); totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); atomicadaptiveaveragegradinput_loop( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); adaptiveaveragegradinput_loop<scalar_t, accscalar_t>( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } } } // namespace Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor adaptive_avg_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda"); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } Tensor adaptive_avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } } // namespace native } // namespace at
abee72b87b34f4478d2c4df78b8ed5ffc44cb724.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DEBUG 0 #include"pub.h" #include<hiprand/hiprand.h> #include<hipfft.h> #include"random.h" #include"gtensorb.h" #include"dynamics.h" #include"dynamics_mart.h" using namespace GS_NS; using namespace DATA_NS; int Dynamics_mart::Initialize(){ ///////////////////////////////////////////////////////// //para setting should be finished before or within this function string ss; ss=(*Vars)["gridsize"]; if (ss!="") ss>>nx>>ny>>nz>>dx>>dy>>dz; <<<<<<< HEAD DeltaTime =0.01f; (*Vars)["deltatime"]>>=DeltaTime; TransitionTemperature=450.0f; (*Vars)["transitiontemperature"]>>=TransitionTemperature; weightGradient= 2.5f; (*Vars)["weightgradient"]>>=weightGradient; weightChemical= 1.0f; (*Vars)["weightchemical"]>>=weightChemical; weightElastic= 100000.0f; (*Vars)["weightelastic"]>>=weightElastic; weightDislocation= 1.0f; (*Vars)["weightdislocation"]>>=weightDislocation; weightNoise = 0.0001f; (*Vars)["weightnoise"]>>=weightNoise; ======= weightExternal= 0.f; weightDislocation= 0.01f; (*Vars)["weightdislocation"]>>=weightDislocation; weightNoise = 1.0f; (*Vars)["weightnoise"]>>=weightNoise; DeltaTime =0.01f; (*Vars)["deltatime"]>>=DeltaTime; weightGradient= 2.5f; (*Vars)["weightgradient"]>>=weightGradient; weightChemical= 1.0f; (*Vars)["weightchemical"]>>=weightChemical; weightElastic= 100000.0f; (*Vars)["weightelastic"]>>=weightElastic; TransitionTemperature=450.0f; (*Vars)["transitiontemperature"]>>=TransitionTemperature; >>>>>>> origin/master ///////////////////////////////////////////////////////// LPC[2]=32.05f; LPC[3]=37.5f; ss=(*Vars)["coefficient"]; if (ss!="") ss>>LPC[1]>>LPC[2]>>LPC[3]; ///////////////////////////////////////////////////////// StrainTensor = &((*Datas)["varianttensor"]); if (StrainTensor->Arr == NULL){ GV<0>::LogAndError<<"Error: variants' strain tensor does not set while initialize dynamics\n"; return -1; } VariantN = StrainTensor->Dimension[1]; ///////////////////////////////////////////////////////// // it is called to initialize the --run-- function // allocate memory initial size and default values //Init(3,nx,ny,nz,Data_NONE); SetCalPos(Data_HOST_DEV); //Eta=eta; // a pointer assign, not value or memory operation Eta = &((*Datas)["eta"]); // may create here if ( Eta->Arr == NULL ){ Eta->Init(4,VariantN,nx,ny,nz,Data_HOST_DEV); SetCalPos(Data_DEV); (*Eta)=0.0f; }else{ Eta->HostToDevice();} ///////////////////////////////////////////////////////// int dim[5]={3,nx,ny,nz}; int dimN[6]={4,VariantN,nx,ny,nz}; <<<<<<< HEAD Noise.InitRandom(4,VariantN,nx,ny,nz); ======= Noise.InitRandom(4,VariantN,nx,ny,nz, 0, 0.001, 0,0); >>>>>>> origin/master Gradient.Init(dimN,Data_HOST_DEV); GradientForce.Init(dimN,Data_HOST_DEV); ChemicalForce.Init(dimN,Data_HOST_DEV); <<<<<<< HEAD //ChemicalFreeEnergy.Init(dim,Data_DEV); ======= >>>>>>> origin/master ///////////////////////////////////////////////////////////////// real C00=3.5f, C01=1.5f, C33=1.0f;//defaut values Data<Real> cijkl(4,3,3,3,3,Data_HOST_DEV); SetCalPos(Data_HOST); cijkl=0.0f; cijkl(0,0,0,0) =C00; cijkl(1,1,1,1) =C00; cijkl(2,2,2,2) =C00; cijkl(0,0,1,1) =C01; cijkl(1,1,2,2) =C01; cijkl(2,2,0,0) =C01; cijkl(0,1,0,1) =C33; cijkl(1,2,1,2) =C33; cijkl(2,0,2,0) =C33; cijkl(1,1,0,0) =C01; cijkl(2,2,1,1) =C01; cijkl(0,0,2,2) =C01; cijkl(1,0,1,0) =C33; cijkl(0,1,1,0) =C33; cijkl(1,0,0,1) =C33; cijkl(2,1,2,1) =C33; cijkl(2,1,1,2) =C33; cijkl(1,2,2,1) =C33; cijkl(0,2,0,2) =C33; cijkl(0,2,2,0) =C33; cijkl(2,0,0,2) =C33; // Data<Real> *modulus; modulus=&((*Datas)["modulus"]); if ( modulus->Arr != NULL ) cijkl = (*modulus); /////////////////////////// Data<Real> vstrain(3,2*VariantN,3,3,Data_HOST_DEV); for (int i=0; i<2*VariantN*3*3; i++) vstrain.Arr[i]=StrainTensor->Arr[i%(VariantN*3*3)]; /////////////////////////// GV<0>::LogAndError<<"Space structure tensor is calculating\n"; B.InitB(VariantN,VariantN,nx,ny,nz,dx.Re,dy.Re,dz.Re,vstrain,cijkl); GV<0>::LogAndError<<"Calculating of space structure tensor relating to the elastic terms is finished\n"; <<<<<<< HEAD ======= if (DEBUG){ B.DeviceToHost(); B.DumpFile("data.b"); } >>>>>>> origin/master ///////////////////////////////////////////////////////////////// ElasticForce.Init(dimN,Data_HOST_DEV); Eta_RT.Init(dimN,Data_HOST_DEV); Eta_CT.Init(dimN,Data_HOST_DEV); ReciprocalTerm.Init(dimN,Data_HOST_DEV); ///////////////////////////////////////////////////////////////// int rank=3,ns[3]={nx,ny,nz},dist=nx*ny*nz,stride=1; GV<0>::LogAndError<<"Cuda fft plan is to creat\n"; if (hipfftPlanMany(&planAll_Cuda,rank,ns,ns,stride,dist,ns,stride,dist,HIPFFT_C2C,VariantN)==HIPFFT_SUCCESS) GV<0>::LogAndError<<"Cuda fft plan is created\n"; else GV<0>::LogAndError<<"Cuda fft plan fails to create\n"; Defect = &((*Datas)["defect"]); if (Defect->Arr==NULL){ Defect->Init(dim,Data_HOST_DEV); // it will be init when read in SetCalPos(Data_HOST_DEV); (*Defect)=0.0f; }else { Defect->HostToDevice();} ///////////////////////////////////////////////////////////////// // the 6 component form should be rewriten to 3*3 form DislocationStressOForm= &((*Datas)["dislocationstress"]); int dim33[6]={5,3,3,nx,ny,nz}; DislocationStress.Init(dim33,Data_HOST_DEV);//it will be also init when read in if (DislocationStressOForm->Arr==NULL){ SetCalPos(Data_HOST_DEV); (DislocationStress)=0.0f; }else { SetCalPos(Data_HOST); for (int i=0; i<nx; i++) for (int j=0; j<ny; j++) for (int k=0; k<nz; k++){ DislocationStress(0,0,i,j,k)=(*DislocationStressOForm)(0,i,j,k); DislocationStress(0,1,i,j,k)=(*DislocationStressOForm)(1,i,j,k); DislocationStress(0,2,i,j,k)=(*DislocationStressOForm)(2,i,j,k); DislocationStress(1,1,i,j,k)=(*DislocationStressOForm)(3,i,j,k); DislocationStress(1,2,i,j,k)=(*DislocationStressOForm)(4,i,j,k); DislocationStress(2,2,i,j,k)=(*DislocationStressOForm)(5,i,j,k); DislocationStress(1,0,i,j,k)=(*DislocationStressOForm)(1,i,j,k); DislocationStress(2,0,i,j,k)=(*DislocationStressOForm)(2,i,j,k); DislocationStress(2,1,i,j,k)=(*DislocationStressOForm)(4,i,j,k); } DislocationStress.HostToDevice(); } DislocationForce.Init(dimN,Data_HOST_DEV); DislocationForceConst.Init(dimN,Data_HOST_DEV); DislocationForceInit(); //this only need one calculation <<<<<<< HEAD if (1&&DEBUG){ SetCalPos(Data_HOST); for (int i=0;i<nx;i++) for (int j=0;j<ny;j++) for (int k=0;k<nz;k++) (*Eta)(0,i,j,k)=.01f; for (int v=1;v<4;v++) for (int i=0;i<nx;i++) for (int j=0;j<ny;j++) for (int k=0;k<nz;k++) (*Eta)(v,i,j,k)=0.f; Eta->HostToDevice(); } ======= >>>>>>> origin/master return 0; } Dynamics_mart::Dynamics_mart(){ } Dynamics_mart::~Dynamics_mart(){ if (planAll_Cuda) hipfftDestroy(planAll_Cuda); } __global__ void Grad_Mart_Kernel(Real *Gradient_arr, Real* Eta_arr,int *dim, Real dx, Real dy, Real dz){ // (* 4 128 128) (* 4 128) int x=blockIdx.x, y= blockIdx.y, z=threadIdx.x, v=blockIdx.z; /**/PPart(Gradient_arr,dim,v,x,y,z)= (PPart(Eta_arr,dim,v,x+1,y,z)+PPart(Eta_arr,dim,v,x-1,y,z)-2*PPart(Eta_arr,dim,v,x,y,z))/(2.0f* dx)/3.0f +(PPart(Eta_arr,dim,v,x,y+1,z)+PPart(Eta_arr,dim,v,x,y-1,z)-2*PPart(Eta_arr,dim,v,x,y,z))/(2.0f* dy)/3.0f +(PPart(Eta_arr,dim,v,x,y,z+1)+PPart(Eta_arr,dim,v,x,y,z-1)-2*PPart(Eta_arr,dim,v,x,y,z))/(2.0f* dz)/3.0f ; // */ } int Dynamics_mart::GradientCalculate(){ dim3 bn(nx,ny,VariantN); dim3 tn(nz); hipLaunchKernelGGL(( Grad_Mart_Kernel), dim3(bn),dim3(tn), 0, 0, Gradient.Arr_dev, Eta->Arr_dev, Eta->Dimension_dev, dx,dy,dz); return 0; } int Dynamics_mart::GradientForceCalculate(){ GradientCalculate(); <<<<<<< HEAD GradientForce= weightGradient* Gradient; ======= GradientForce= Gradient; >>>>>>> origin/master return 0; } int Dynamics_mart::LPCConstruct(){ LPC[1]=0.02f *(Temperature-TransitionTemperature); returnhipLaunchKernelGGL(( 0); } , , < HEAD __global__ void ChemicalFreeEnergy_mart_kernel(Real*cfn,Real*eta, int VariantN){ int x=blockIdx.x, dim3(y=blockIdx.y), z=threadIdx.x, nx=gridDim.x, ny=gridDim.y, nz=blockDim.x; cfn[(x*ny+y)*nz+z]=0.f; for (int i=0; i<VariantN; i++) cfn[(x*ny+y)*nz+z]+=(eta[((i*nx+x)*ny+y)*nz+z]^2); cfn[(x*ny+y)*nz+z]=(cfn[(x*ny+y)*nz+z]^3)/6.0f; for (int i=0; i<VariantN; i++) cfn[(x*ny+y)*nz+z]+=(eta[((i*nx+x)*ny+y)*nz+z]^2)/2.0f+(eta[((i*nx+x)*ny+y)*nz+z]^4)/4.0f; } __global__ void ChemiFor_Mart_Kernel(Real*ChemiForce_arr, Real*Eta_arr,Real a1,Real a2,Real a3,Real weight){// n1*n2*n3 each variant have an driving force ======= __global__ void ChemiFor_Mart_Kernel(Real*ChemiForce_arr, Real*Eta_arr,Real a1,Real a2,Real a3){// n1*n2*n3 each variant have an driving force ,( 0),( 0),( 0),( 0), ,( 0),( 0),( 0),( 0), > origin/master int x=blockIdx.x, y=blockIdx.y, z=threadIdx.x,v=blockIdx.z, nx=gridDim.x, ny=gridDim.y, nz=blockDim.x,nv=gridDim.z; int tid=(v*nx+x)*ny+y)*nz+z; // request the same memory at the same time will lead to nan at the wrost situation ChemiForce_arr[tid]=0.0; <<<<<<< HEAD sqrt(abs(a1/(a2-a3))); for (int i=0;i<nv;i++) ChemiForce_arr[tid]+=((sqrt(abs(a1/(a2-a3)))*Eta_arr[((i*nx+x)*ny+y)*nz+z])^2); if (Eta_arr[tid]>=0){ ChemiForce_arr[tid]= -weight*(sqrt(abs(a1/(a2-a3)))*Eta_arr[tid]) *( a1 -a2*((sqrt(abs(a1/(a2-a3)))*Eta_arr[tid])^2) +a3*ChemiForce_arr[tid]); }else{//<0 power 2 ChemiForce_arr[tid]= 1000.0*weight*a2*Eta_arr[tid]*Eta_arr[tid] ; ======= if (Eta_arr<=0){ for (int i=0;i<nv;i++) ChemiForce_arr[tid]+=(Eta_arr[((i*nx+x)*ny+y)*nz+z]^2); ChemiForce_arr[tid]= Eta_arr[tid]*( a1 -a2*(Eta_arr[tid]^2) +a3*ChemiForce_arr[tid]); }else{ for (int i=0;i<nv;i++) ChemiForce_arr[tid]+=(Eta_arr[((i*nx+x)*ny+y)*nz+z]); ChemiForce_arr[tid]= Eta_arr[tid]*( a1 -a2*Eta_arr[tid] +a3*ChemiForce_arr[tid]); >>>>>>> origin/master } } inthipLaunchKernelGGL(( Dynamics_mart::ChemicalForceCalculate()){ ///////////////////////// , , < HEAD dim3 bvn(nx,ny,VariantN); dim3 bn(nx,ny); dim3 tn(nz); LPCConstruct(); ChemiFor_Mart_Kernel, bvn,tn, 0, 0, 0, 0, ChemicalForce.Arr_dev, Eta->Arr_dev, LPC[1], LPC[2], LPC[3],weightChemical); //ChemicalFreeEnergy_mart_kernel<<<bn,tn>>>(ChemicalFreeEnergy.Arr_dev,Eta->Arr_dev,VariantN); ======= dim3 bn(nx,ny,VariantN); dim3 tn(nz); LPCConstruct(); hipLaunchKernelGGL(( ChemiFor_Mart_Kernel), dim3(bn),dim3(tn), 0, 0, ChemicalForce.Arr_dev, Eta->Arr_dev, LPC[1], LPC[2], LPC[3]); >>>>>>> origin/master return 0; } //(* 2373 0.9) __global__ void ElaFor_Mart_Kernel(Complex *ReTerm,Complex*Eta_sq,Real* B){ int nx=gridDim.x, ny=gridDim.y, nz=blockDim.x,nv=gridDim.z; int x=blockIdx.x, y=blockIdx.y, z=threadIdx.x, v=blockIdx.z; ReTerm[((v*nx+x)*ny+y)*nz+z] = 0; <<<<<<< HEAD for (int i=0;i<nv;i++) ======= for (int i=0;i<v;i++) >>>>>>> origin/master ReTerm[((v*nx+x)*ny+y)*nz+z] += B[(((v*nv+i)*nx+x)*ny+y)*nz+z]* Eta_sq[((i*nx+x)*ny+y)*nz+z]; } int Dynamics_mart::ElasticForceCalculate(){ SetCalPos(Data_DEV); Eta_CT=(*Eta)*(*Eta); //Store it in the buffer area hipfftExecC2C(planAll_Cuda,(hipfftComplex*)Eta_CT.Arr_dev,(hipfftComplex*)Eta_CT.Arr_dev,HIPFFT_FORWARD); dim3 bn(nx,ny,VariantN); dim3 tn(nz); Eta_CT = Eta_CT/Eta_CT.N()*VariantN; hipLaunchKernelGGL(( ElaFor_Mart_Kernel), dim3(bn),dim3(tn), 0, 0, ReciprocalTerm.Arr_dev,Eta_CT.Arr_dev,B.Arr_dev); hipfftExecC2C(planAll_Cuda,(hipfftComplex*)ReciprocalTerm.Arr_dev,(hipfftComplex*)ReciprocalTerm.Arr_dev,HIPFFT_BACKWARD); <<<<<<< HEAD ElasticForce = - weightElastic* ReciprocalTerm* (*Eta); ======= ElasticForce = ReciprocalTerm* (*Eta); if (0){ Eta_CT.DeviceToHost(); ElasticForce.DeviceToHost(); Eta_CT.DumpFile("data.eta_squre"); ElasticForce.DumpFile("data.r_term"); } >>>>>>> origin/master return 0; } int Dynamics_mart::DislocationForceInit(){ SetCalPos(Data_HOST); for (int saq=0;saq<VariantN;saq++){ for (int i=0;i<nx;i++) for (int j=0;j<ny;j++) for (int k=0;k<nz;k++){ DislocationForceConst(saq,i,j,k)=0; for (int sa=0;sa<3;sa++) for (int sap=0;sap<3;sap++){ DislocationForceConst(saq,i,j,k)=DislocationForceConst(saq,i,j,k)+25.0f*DislocationStress(sa,sap,i,j,k)*(*StrainTensor)(saq,sa,sap); } } } DislocationForceConst.HostToDevice(); SetCalPos(Data_DEV); return 0; } int Dynamics_mart::DislocationForceCalculate(){ SetCalPos(Data_DEV); <<<<<<< HEAD DislocationForce = -weightDislocation*DislocationForceConst*(*Eta); ======= DislocationForce=DislocationForceConst*(*Eta); >>>>>>> origin/master return 0; } __global__ void Block_Mart_Kernel(Real *Eta_arr, Real *Defect_arr){ int pn= blockIdx.y*gridDim.z*blockDim.x +blockIdx.z*blockDim.x +threadIdx.x; int pvn= blockIdx.x *gridDim.y *gridDim.z * blockDim.x +pn; Eta_arr[pvn]=Eta_arr[pvn]*(1.0f-Defect_arr[pn]); } int Dynamics_mart::Block(){ dim3 bn(VariantN,nx,ny); dim3 tn(nz); hipLaunchKernelGGL(( Block_Mart_Kernel), dim3(bn),dim3(tn), 0, 0, Eta->Arr_dev,Defect->Arr_dev); return 0; } int Dynamics_mart::Calculate(){ <<<<<<< HEAD if (1&&DEBUG) { Eta->DeviceToHost(); Eta->DumpFile("data.eta.before"); } SetCalPos(Data_DEV); ======= >>>>>>> origin/master string ss; (*Vars)["temperature"]>>=Temperature; GradientForceCalculate(); ChemicalForceCalculate(); ElasticForceCalculate(); DislocationForceCalculate(); //////////////////////////// Eta_RT=0.f; <<<<<<< HEAD Eta_RT += GradientForce; Eta_RT += ChemicalForce; Eta_RT += ElasticForce; Eta_RT += DislocationForce; ////////////// Noise.NewNormal_device(); Eta_RT += weightNoise * Noise; ////////////// ======= if (weightGradient>0) Eta_RT += weightGradient*GradientForce; if (weightChemical>0) Eta_RT += (0-weightChemical)*ChemicalForce; if (weightElastic>0) Eta_RT += (0-weightElastic)*ElasticForce; if (weightDislocation>0) Eta_RT += (0-weightDislocation)*DislocationForce; if (weightExternal>0) Eta_RT += (0-weightExternal)*ExternalForce; if (weightNoise>0){ Noise.NewNormal_device(); Eta_RT += weightNoise*0.0001* Noise; } >>>>>>> origin/master (*Eta) += DeltaTime* Eta_RT; //defect block Block(); /////////// <<<<<<< HEAD if (1&&DEBUG) { GradientForce.DeviceToHost(); ChemicalForce.DeviceToHost(); ElasticForce.DeviceToHost(); DislocationForce.DeviceToHost(); ======= if (DEBUG) { GradientForce.DeviceToHost(); ChemicalForce.DeviceToHost(); ElasticForce.DeviceToHost(); >>>>>>> origin/master Eta->DeviceToHost(); /// GradientForce.DumpFile("data.gradient"); ChemicalForce.DumpFile("data.chemical"); ElasticForce.DumpFile("data.elastic"); <<<<<<< HEAD DislocationForce.DumpFile("data.dislocation"); Eta->DumpFile("data.eta"); Eta_RT = weightNoise * Noise; Eta_RT.DeviceToHost(); Eta_RT.DumpFile("data.noise"); //string ss; cin>>ss; ======= Eta->DumpFile("data.eta"); >>>>>>> origin/master } /////////////////////////////// return 0; } int Dynamics_mart::RunFunc(string funcName){ return 0; } int Dynamics_mart::Fix(real progress){ string ss,mode; ss = (*Vars)["fix"]; do{ ss>>mode; if (mode=="temperature"hipLaunchKernelGGL(( )){ , , < HEAD real st,et; ss>>st>>et; ======= real st,et; //start and end temperature ss>>st>>et; , 0, 0, 0, 0, , 0, 0, 0, 0, > origin/master (*Vars)["temperature"])<<=(st+ progress*(et- st)); } else if (mode=="pressure" ){ } else{ GV<0>::LogAndError<<"Error: fix style "<<mode<<" does not find!\n"; } } while ( ss != ""); return 0; } string Dynamics_mart::Get(string ss){ // return the statistic info. string var; ss>>var; if (var == "temperature") return ToString(Temperature); <<<<<<< HEAD if (var == "eta") return ToString(Eta->TotalDevice()/Eta->N()); if (var == "gradient" ) return ToString(GradientForce.TotalDevice()/GradientForce.N()); if (var == "chemical" ) return ToString(ChemicalForce.TotalDevice()/ChemicalForce.N()); if (var == "elastic" ) return ToString(ElasticForce.TotalDevice()/ElasticForce.N()); if (var == "dislocation") return ToString(DislocationForce.TotalDevice()/DislocationForce.N()); //if (var == "chemical.free.energy") return ToString(ChemicalFreeEnergy.TotalDevice()/ChemicalFreeEnergy.N()); ======= >>>>>>> origin/master else return "nan"; }
abee72b87b34f4478d2c4df78b8ed5ffc44cb724.cu
#define DEBUG 0 #include"pub.h" #include<curand.h> #include<cufft.h> #include"random.h" #include"gtensorb.h" #include"dynamics.h" #include"dynamics_mart.h" using namespace GS_NS; using namespace DATA_NS; int Dynamics_mart::Initialize(){ ///////////////////////////////////////////////////////// //para setting should be finished before or within this function string ss; ss=(*Vars)["gridsize"]; if (ss!="") ss>>nx>>ny>>nz>>dx>>dy>>dz; <<<<<<< HEAD DeltaTime =0.01f; (*Vars)["deltatime"]>>=DeltaTime; TransitionTemperature=450.0f; (*Vars)["transitiontemperature"]>>=TransitionTemperature; weightGradient= 2.5f; (*Vars)["weightgradient"]>>=weightGradient; weightChemical= 1.0f; (*Vars)["weightchemical"]>>=weightChemical; weightElastic= 100000.0f; (*Vars)["weightelastic"]>>=weightElastic; weightDislocation= 1.0f; (*Vars)["weightdislocation"]>>=weightDislocation; weightNoise = 0.0001f; (*Vars)["weightnoise"]>>=weightNoise; ======= weightExternal= 0.f; weightDislocation= 0.01f; (*Vars)["weightdislocation"]>>=weightDislocation; weightNoise = 1.0f; (*Vars)["weightnoise"]>>=weightNoise; DeltaTime =0.01f; (*Vars)["deltatime"]>>=DeltaTime; weightGradient= 2.5f; (*Vars)["weightgradient"]>>=weightGradient; weightChemical= 1.0f; (*Vars)["weightchemical"]>>=weightChemical; weightElastic= 100000.0f; (*Vars)["weightelastic"]>>=weightElastic; TransitionTemperature=450.0f; (*Vars)["transitiontemperature"]>>=TransitionTemperature; >>>>>>> origin/master ///////////////////////////////////////////////////////// LPC[2]=32.05f; LPC[3]=37.5f; ss=(*Vars)["coefficient"]; if (ss!="") ss>>LPC[1]>>LPC[2]>>LPC[3]; ///////////////////////////////////////////////////////// StrainTensor = &((*Datas)["varianttensor"]); if (StrainTensor->Arr == NULL){ GV<0>::LogAndError<<"Error: variants' strain tensor does not set while initialize dynamics\n"; return -1; } VariantN = StrainTensor->Dimension[1]; ///////////////////////////////////////////////////////// // it is called to initialize the --run-- function // allocate memory initial size and default values //Init(3,nx,ny,nz,Data_NONE); SetCalPos(Data_HOST_DEV); //Eta=eta; // a pointer assign, not value or memory operation Eta = &((*Datas)["eta"]); // may create here if ( Eta->Arr == NULL ){ Eta->Init(4,VariantN,nx,ny,nz,Data_HOST_DEV); SetCalPos(Data_DEV); (*Eta)=0.0f; }else{ Eta->HostToDevice();} ///////////////////////////////////////////////////////// int dim[5]={3,nx,ny,nz}; int dimN[6]={4,VariantN,nx,ny,nz}; <<<<<<< HEAD Noise.InitRandom(4,VariantN,nx,ny,nz); ======= Noise.InitRandom(4,VariantN,nx,ny,nz, 0, 0.001, 0,0); >>>>>>> origin/master Gradient.Init(dimN,Data_HOST_DEV); GradientForce.Init(dimN,Data_HOST_DEV); ChemicalForce.Init(dimN,Data_HOST_DEV); <<<<<<< HEAD //ChemicalFreeEnergy.Init(dim,Data_DEV); ======= >>>>>>> origin/master ///////////////////////////////////////////////////////////////// real C00=3.5f, C01=1.5f, C33=1.0f;//defaut values Data<Real> cijkl(4,3,3,3,3,Data_HOST_DEV); SetCalPos(Data_HOST); cijkl=0.0f; cijkl(0,0,0,0) =C00; cijkl(1,1,1,1) =C00; cijkl(2,2,2,2) =C00; cijkl(0,0,1,1) =C01; cijkl(1,1,2,2) =C01; cijkl(2,2,0,0) =C01; cijkl(0,1,0,1) =C33; cijkl(1,2,1,2) =C33; cijkl(2,0,2,0) =C33; cijkl(1,1,0,0) =C01; cijkl(2,2,1,1) =C01; cijkl(0,0,2,2) =C01; cijkl(1,0,1,0) =C33; cijkl(0,1,1,0) =C33; cijkl(1,0,0,1) =C33; cijkl(2,1,2,1) =C33; cijkl(2,1,1,2) =C33; cijkl(1,2,2,1) =C33; cijkl(0,2,0,2) =C33; cijkl(0,2,2,0) =C33; cijkl(2,0,0,2) =C33; // Data<Real> *modulus; modulus=&((*Datas)["modulus"]); if ( modulus->Arr != NULL ) cijkl = (*modulus); /////////////////////////// Data<Real> vstrain(3,2*VariantN,3,3,Data_HOST_DEV); for (int i=0; i<2*VariantN*3*3; i++) vstrain.Arr[i]=StrainTensor->Arr[i%(VariantN*3*3)]; /////////////////////////// GV<0>::LogAndError<<"Space structure tensor is calculating\n"; B.InitB(VariantN,VariantN,nx,ny,nz,dx.Re,dy.Re,dz.Re,vstrain,cijkl); GV<0>::LogAndError<<"Calculating of space structure tensor relating to the elastic terms is finished\n"; <<<<<<< HEAD ======= if (DEBUG){ B.DeviceToHost(); B.DumpFile("data.b"); } >>>>>>> origin/master ///////////////////////////////////////////////////////////////// ElasticForce.Init(dimN,Data_HOST_DEV); Eta_RT.Init(dimN,Data_HOST_DEV); Eta_CT.Init(dimN,Data_HOST_DEV); ReciprocalTerm.Init(dimN,Data_HOST_DEV); ///////////////////////////////////////////////////////////////// int rank=3,ns[3]={nx,ny,nz},dist=nx*ny*nz,stride=1; GV<0>::LogAndError<<"Cuda fft plan is to creat\n"; if (cufftPlanMany(&planAll_Cuda,rank,ns,ns,stride,dist,ns,stride,dist,CUFFT_C2C,VariantN)==CUFFT_SUCCESS) GV<0>::LogAndError<<"Cuda fft plan is created\n"; else GV<0>::LogAndError<<"Cuda fft plan fails to create\n"; Defect = &((*Datas)["defect"]); if (Defect->Arr==NULL){ Defect->Init(dim,Data_HOST_DEV); // it will be init when read in SetCalPos(Data_HOST_DEV); (*Defect)=0.0f; }else { Defect->HostToDevice();} ///////////////////////////////////////////////////////////////// // the 6 component form should be rewriten to 3*3 form DislocationStressOForm= &((*Datas)["dislocationstress"]); int dim33[6]={5,3,3,nx,ny,nz}; DislocationStress.Init(dim33,Data_HOST_DEV);//it will be also init when read in if (DislocationStressOForm->Arr==NULL){ SetCalPos(Data_HOST_DEV); (DislocationStress)=0.0f; }else { SetCalPos(Data_HOST); for (int i=0; i<nx; i++) for (int j=0; j<ny; j++) for (int k=0; k<nz; k++){ DislocationStress(0,0,i,j,k)=(*DislocationStressOForm)(0,i,j,k); DislocationStress(0,1,i,j,k)=(*DislocationStressOForm)(1,i,j,k); DislocationStress(0,2,i,j,k)=(*DislocationStressOForm)(2,i,j,k); DislocationStress(1,1,i,j,k)=(*DislocationStressOForm)(3,i,j,k); DislocationStress(1,2,i,j,k)=(*DislocationStressOForm)(4,i,j,k); DislocationStress(2,2,i,j,k)=(*DislocationStressOForm)(5,i,j,k); DislocationStress(1,0,i,j,k)=(*DislocationStressOForm)(1,i,j,k); DislocationStress(2,0,i,j,k)=(*DislocationStressOForm)(2,i,j,k); DislocationStress(2,1,i,j,k)=(*DislocationStressOForm)(4,i,j,k); } DislocationStress.HostToDevice(); } DislocationForce.Init(dimN,Data_HOST_DEV); DislocationForceConst.Init(dimN,Data_HOST_DEV); DislocationForceInit(); //this only need one calculation <<<<<<< HEAD if (1&&DEBUG){ SetCalPos(Data_HOST); for (int i=0;i<nx;i++) for (int j=0;j<ny;j++) for (int k=0;k<nz;k++) (*Eta)(0,i,j,k)=.01f; for (int v=1;v<4;v++) for (int i=0;i<nx;i++) for (int j=0;j<ny;j++) for (int k=0;k<nz;k++) (*Eta)(v,i,j,k)=0.f; Eta->HostToDevice(); } ======= >>>>>>> origin/master return 0; } Dynamics_mart::Dynamics_mart(){ } Dynamics_mart::~Dynamics_mart(){ if (planAll_Cuda) cufftDestroy(planAll_Cuda); } __global__ void Grad_Mart_Kernel(Real *Gradient_arr, Real* Eta_arr,int *dim, Real dx, Real dy, Real dz){ // (* 4 128 128) (* 4 128) int x=blockIdx.x, y= blockIdx.y, z=threadIdx.x, v=blockIdx.z; /**/PPart(Gradient_arr,dim,v,x,y,z)= (PPart(Eta_arr,dim,v,x+1,y,z)+PPart(Eta_arr,dim,v,x-1,y,z)-2*PPart(Eta_arr,dim,v,x,y,z))/(2.0f* dx)/3.0f +(PPart(Eta_arr,dim,v,x,y+1,z)+PPart(Eta_arr,dim,v,x,y-1,z)-2*PPart(Eta_arr,dim,v,x,y,z))/(2.0f* dy)/3.0f +(PPart(Eta_arr,dim,v,x,y,z+1)+PPart(Eta_arr,dim,v,x,y,z-1)-2*PPart(Eta_arr,dim,v,x,y,z))/(2.0f* dz)/3.0f ; // */ } int Dynamics_mart::GradientCalculate(){ dim3 bn(nx,ny,VariantN); dim3 tn(nz); Grad_Mart_Kernel<<<bn,tn>>>(Gradient.Arr_dev, Eta->Arr_dev, Eta->Dimension_dev, dx,dy,dz); return 0; } int Dynamics_mart::GradientForceCalculate(){ GradientCalculate(); <<<<<<< HEAD GradientForce= weightGradient* Gradient; ======= GradientForce= Gradient; >>>>>>> origin/master return 0; } int Dynamics_mart::LPCConstruct(){ LPC[1]=0.02f *(Temperature-TransitionTemperature); return 0; } <<<<<<< HEAD __global__ void ChemicalFreeEnergy_mart_kernel(Real*cfn,Real*eta, int VariantN){ int x=blockIdx.x, y=blockIdx.y, z=threadIdx.x, nx=gridDim.x, ny=gridDim.y, nz=blockDim.x; cfn[(x*ny+y)*nz+z]=0.f; for (int i=0; i<VariantN; i++) cfn[(x*ny+y)*nz+z]+=(eta[((i*nx+x)*ny+y)*nz+z]^2); cfn[(x*ny+y)*nz+z]=(cfn[(x*ny+y)*nz+z]^3)/6.0f; for (int i=0; i<VariantN; i++) cfn[(x*ny+y)*nz+z]+=(eta[((i*nx+x)*ny+y)*nz+z]^2)/2.0f+(eta[((i*nx+x)*ny+y)*nz+z]^4)/4.0f; } __global__ void ChemiFor_Mart_Kernel(Real*ChemiForce_arr, Real*Eta_arr,Real a1,Real a2,Real a3,Real weight){// n1*n2*n3 each variant have an driving force ======= __global__ void ChemiFor_Mart_Kernel(Real*ChemiForce_arr, Real*Eta_arr,Real a1,Real a2,Real a3){// n1*n2*n3 each variant have an driving force >>>>>>> origin/master int x=blockIdx.x, y=blockIdx.y, z=threadIdx.x,v=blockIdx.z, nx=gridDim.x, ny=gridDim.y, nz=blockDim.x,nv=gridDim.z; int tid=((v*nx+x)*ny+y)*nz+z; // request the same memory at the same time will lead to nan at the wrost situation ChemiForce_arr[tid]=0.0; <<<<<<< HEAD sqrt(abs(a1/(a2-a3))); for (int i=0;i<nv;i++) ChemiForce_arr[tid]+=((sqrt(abs(a1/(a2-a3)))*Eta_arr[((i*nx+x)*ny+y)*nz+z])^2); if (Eta_arr[tid]>=0){ ChemiForce_arr[tid]= -weight*(sqrt(abs(a1/(a2-a3)))*Eta_arr[tid]) *( a1 -a2*((sqrt(abs(a1/(a2-a3)))*Eta_arr[tid])^2) +a3*ChemiForce_arr[tid]); }else{//<0 power 2 ChemiForce_arr[tid]= 1000.0*weight*a2*Eta_arr[tid]*Eta_arr[tid] ; ======= if (Eta_arr<=0){ for (int i=0;i<nv;i++) ChemiForce_arr[tid]+=(Eta_arr[((i*nx+x)*ny+y)*nz+z]^2); ChemiForce_arr[tid]= Eta_arr[tid]*( a1 -a2*(Eta_arr[tid]^2) +a3*ChemiForce_arr[tid]); }else{ for (int i=0;i<nv;i++) ChemiForce_arr[tid]+=(Eta_arr[((i*nx+x)*ny+y)*nz+z]); ChemiForce_arr[tid]= Eta_arr[tid]*( a1 -a2*Eta_arr[tid] +a3*ChemiForce_arr[tid]); >>>>>>> origin/master } } int Dynamics_mart::ChemicalForceCalculate(){ ///////////////////////// <<<<<<< HEAD dim3 bvn(nx,ny,VariantN); dim3 bn(nx,ny); dim3 tn(nz); LPCConstruct(); ChemiFor_Mart_Kernel<<<bvn,tn>>>(ChemicalForce.Arr_dev, Eta->Arr_dev, LPC[1], LPC[2], LPC[3],weightChemical); //ChemicalFreeEnergy_mart_kernel<<<bn,tn>>>(ChemicalFreeEnergy.Arr_dev,Eta->Arr_dev,VariantN); ======= dim3 bn(nx,ny,VariantN); dim3 tn(nz); LPCConstruct(); ChemiFor_Mart_Kernel<<<bn,tn>>>(ChemicalForce.Arr_dev, Eta->Arr_dev, LPC[1], LPC[2], LPC[3]); >>>>>>> origin/master return 0; } //(* 2373 0.9) __global__ void ElaFor_Mart_Kernel(Complex *ReTerm,Complex*Eta_sq,Real* B){ int nx=gridDim.x, ny=gridDim.y, nz=blockDim.x,nv=gridDim.z; int x=blockIdx.x, y=blockIdx.y, z=threadIdx.x, v=blockIdx.z; ReTerm[((v*nx+x)*ny+y)*nz+z] = 0; <<<<<<< HEAD for (int i=0;i<nv;i++) ======= for (int i=0;i<v;i++) >>>>>>> origin/master ReTerm[((v*nx+x)*ny+y)*nz+z] += B[(((v*nv+i)*nx+x)*ny+y)*nz+z]* Eta_sq[((i*nx+x)*ny+y)*nz+z]; } int Dynamics_mart::ElasticForceCalculate(){ SetCalPos(Data_DEV); Eta_CT=(*Eta)*(*Eta); //Store it in the buffer area cufftExecC2C(planAll_Cuda,(cufftComplex*)Eta_CT.Arr_dev,(cufftComplex*)Eta_CT.Arr_dev,CUFFT_FORWARD); dim3 bn(nx,ny,VariantN); dim3 tn(nz); Eta_CT = Eta_CT/Eta_CT.N()*VariantN; ElaFor_Mart_Kernel<<<bn,tn>>>(ReciprocalTerm.Arr_dev,Eta_CT.Arr_dev,B.Arr_dev); cufftExecC2C(planAll_Cuda,(cufftComplex*)ReciprocalTerm.Arr_dev,(cufftComplex*)ReciprocalTerm.Arr_dev,CUFFT_INVERSE); <<<<<<< HEAD ElasticForce = - weightElastic* ReciprocalTerm* (*Eta); ======= ElasticForce = ReciprocalTerm* (*Eta); if (0){ Eta_CT.DeviceToHost(); ElasticForce.DeviceToHost(); Eta_CT.DumpFile("data.eta_squre"); ElasticForce.DumpFile("data.r_term"); } >>>>>>> origin/master return 0; } int Dynamics_mart::DislocationForceInit(){ SetCalPos(Data_HOST); for (int saq=0;saq<VariantN;saq++){ for (int i=0;i<nx;i++) for (int j=0;j<ny;j++) for (int k=0;k<nz;k++){ DislocationForceConst(saq,i,j,k)=0; for (int sa=0;sa<3;sa++) for (int sap=0;sap<3;sap++){ DislocationForceConst(saq,i,j,k)=DislocationForceConst(saq,i,j,k)+25.0f*DislocationStress(sa,sap,i,j,k)*(*StrainTensor)(saq,sa,sap); } } } DislocationForceConst.HostToDevice(); SetCalPos(Data_DEV); return 0; } int Dynamics_mart::DislocationForceCalculate(){ SetCalPos(Data_DEV); <<<<<<< HEAD DislocationForce = -weightDislocation*DislocationForceConst*(*Eta); ======= DislocationForce=DislocationForceConst*(*Eta); >>>>>>> origin/master return 0; } __global__ void Block_Mart_Kernel(Real *Eta_arr, Real *Defect_arr){ int pn= blockIdx.y*gridDim.z*blockDim.x +blockIdx.z*blockDim.x +threadIdx.x; int pvn= blockIdx.x *gridDim.y *gridDim.z * blockDim.x +pn; Eta_arr[pvn]=Eta_arr[pvn]*(1.0f-Defect_arr[pn]); } int Dynamics_mart::Block(){ dim3 bn(VariantN,nx,ny); dim3 tn(nz); Block_Mart_Kernel<<<bn,tn>>>(Eta->Arr_dev,Defect->Arr_dev); return 0; } int Dynamics_mart::Calculate(){ <<<<<<< HEAD if (1&&DEBUG) { Eta->DeviceToHost(); Eta->DumpFile("data.eta.before"); } SetCalPos(Data_DEV); ======= >>>>>>> origin/master string ss; (*Vars)["temperature"]>>=Temperature; GradientForceCalculate(); ChemicalForceCalculate(); ElasticForceCalculate(); DislocationForceCalculate(); //////////////////////////// Eta_RT=0.f; <<<<<<< HEAD Eta_RT += GradientForce; Eta_RT += ChemicalForce; Eta_RT += ElasticForce; Eta_RT += DislocationForce; ////////////// Noise.NewNormal_device(); Eta_RT += weightNoise * Noise; ////////////// ======= if (weightGradient>0) Eta_RT += weightGradient*GradientForce; if (weightChemical>0) Eta_RT += (0-weightChemical)*ChemicalForce; if (weightElastic>0) Eta_RT += (0-weightElastic)*ElasticForce; if (weightDislocation>0) Eta_RT += (0-weightDislocation)*DislocationForce; if (weightExternal>0) Eta_RT += (0-weightExternal)*ExternalForce; if (weightNoise>0){ Noise.NewNormal_device(); Eta_RT += weightNoise*0.0001* Noise; } >>>>>>> origin/master (*Eta) += DeltaTime* Eta_RT; //defect block Block(); /////////// <<<<<<< HEAD if (1&&DEBUG) { GradientForce.DeviceToHost(); ChemicalForce.DeviceToHost(); ElasticForce.DeviceToHost(); DislocationForce.DeviceToHost(); ======= if (DEBUG) { GradientForce.DeviceToHost(); ChemicalForce.DeviceToHost(); ElasticForce.DeviceToHost(); >>>>>>> origin/master Eta->DeviceToHost(); /// GradientForce.DumpFile("data.gradient"); ChemicalForce.DumpFile("data.chemical"); ElasticForce.DumpFile("data.elastic"); <<<<<<< HEAD DislocationForce.DumpFile("data.dislocation"); Eta->DumpFile("data.eta"); Eta_RT = weightNoise * Noise; Eta_RT.DeviceToHost(); Eta_RT.DumpFile("data.noise"); //string ss; cin>>ss; ======= Eta->DumpFile("data.eta"); >>>>>>> origin/master } /////////////////////////////// return 0; } int Dynamics_mart::RunFunc(string funcName){ return 0; } int Dynamics_mart::Fix(real progress){ string ss,mode; ss = (*Vars)["fix"]; do{ ss>>mode; if (mode=="temperature" ){ <<<<<<< HEAD real st,et; ss>>st>>et; ======= real st,et; //start and end temperature ss>>st>>et; >>>>>>> origin/master ((*Vars)["temperature"])<<=(st+ progress*(et- st)); } else if (mode=="pressure" ){ } else{ GV<0>::LogAndError<<"Error: fix style "<<mode<<" does not find!\n"; } } while ( ss != ""); return 0; } string Dynamics_mart::Get(string ss){ // return the statistic info. string var; ss>>var; if (var == "temperature") return ToString(Temperature); <<<<<<< HEAD if (var == "eta") return ToString(Eta->TotalDevice()/Eta->N()); if (var == "gradient" ) return ToString(GradientForce.TotalDevice()/GradientForce.N()); if (var == "chemical" ) return ToString(ChemicalForce.TotalDevice()/ChemicalForce.N()); if (var == "elastic" ) return ToString(ElasticForce.TotalDevice()/ElasticForce.N()); if (var == "dislocation") return ToString(DislocationForce.TotalDevice()/DislocationForce.N()); //if (var == "chemical.free.energy") return ToString(ChemicalFreeEnergy.TotalDevice()/ChemicalFreeEnergy.N()); ======= >>>>>>> origin/master else return "nan"; }
aa91bba0ede563a2ead0e78ec33c66973e02765e.hip
// !!! This is a file automatically generated by hipify!!! #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/KmBurstL2Norm.cuh> #include <faiss/gpu/impl/TestKmBurstL2Norm.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/Reductions.cuh> #include <algorithm> #include <thrust/fill.h> namespace faiss { namespace gpu { namespace test_kmb_l2norm{ // // Test cases // template<typename T> void test_case_0(Tensor<float, 3, true, int>& outDists, Tensor<T, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<T, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<T, 4, true, int>& ave, Tensor<T, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, hipStream_t stream){ thrust::fill(thrust::hip::par.on(stream), vals.data(), vals.end(),1); } template<typename T> void test_case_1(Tensor<float, 3, true, int>& outDists, Tensor<T, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<T, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<T, 4, true, int>& ave, Tensor<T, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, hipStream_t stream){ runKmBurstL2Norm(centroids,ave,blocks, vals,patchsize, 1.,true,stream); } } // namespace test_kmb_l2norm // // Main Test Function // template<typename T> void test_kmburst_l2norm(int test_case, Tensor<float, 3, true, int>& outDists, Tensor<T, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<T, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<T, 4, true, int>& ave, Tensor<T, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, hipStream_t stream){ fprintf(stdout,"Testing: [km burst l2norm]\n"); if (test_case == 0){ test_kmb_l2norm::test_case_0<T>(outDists,burst,blocks,centroids,clusters, ave,modes,vals,patchsize,offset,stream); }else if (test_case == 1){ test_kmb_l2norm::test_case_1<T>(outDists,burst,blocks,centroids,clusters, ave,modes,vals,patchsize,offset,stream); }else{ FAISS_THROW_FMT("[TestKmBurstL2Norm.cu]: unimplemented test case %d",test_case); } } // // Template Init // void test_kmburst_l2norm(int test_case, Tensor<float, 3, true, int>& outDists, Tensor<float, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<float, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<float, 4, true, int>& ave, Tensor<float, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, hipStream_t stream){ test_kmburst_l2norm<float>(test_case, outDists, burst, blocks, centroids, clusters, ave, modes, vals, patchsize, offset, stream); } void test_kmburst_l2norm(int test_case, Tensor<float, 3, true, int>& outDists, Tensor<half, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<half, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<half, 4, true, int>& ave, Tensor<half, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, hipStream_t stream){ test_kmburst_l2norm<half>(test_case, outDists, burst, blocks,centroids, clusters, ave, modes, vals, patchsize, offset, stream); } } }
aa91bba0ede563a2ead0e78ec33c66973e02765e.cu
#include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/KmBurstL2Norm.cuh> #include <faiss/gpu/impl/TestKmBurstL2Norm.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/Reductions.cuh> #include <algorithm> #include <thrust/fill.h> namespace faiss { namespace gpu { namespace test_kmb_l2norm{ // // Test cases // template<typename T> void test_case_0(Tensor<float, 3, true, int>& outDists, Tensor<T, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<T, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<T, 4, true, int>& ave, Tensor<T, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, cudaStream_t stream){ thrust::fill(thrust::cuda::par.on(stream), vals.data(), vals.end(),1); } template<typename T> void test_case_1(Tensor<float, 3, true, int>& outDists, Tensor<T, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<T, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<T, 4, true, int>& ave, Tensor<T, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, cudaStream_t stream){ runKmBurstL2Norm(centroids,ave,blocks, vals,patchsize, 1.,true,stream); } } // namespace test_kmb_l2norm // // Main Test Function // template<typename T> void test_kmburst_l2norm(int test_case, Tensor<float, 3, true, int>& outDists, Tensor<T, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<T, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<T, 4, true, int>& ave, Tensor<T, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, cudaStream_t stream){ fprintf(stdout,"Testing: [km burst l2norm]\n"); if (test_case == 0){ test_kmb_l2norm::test_case_0<T>(outDists,burst,blocks,centroids,clusters, ave,modes,vals,patchsize,offset,stream); }else if (test_case == 1){ test_kmb_l2norm::test_case_1<T>(outDists,burst,blocks,centroids,clusters, ave,modes,vals,patchsize,offset,stream); }else{ FAISS_THROW_FMT("[TestKmBurstL2Norm.cu]: unimplemented test case %d",test_case); } } // // Template Init // void test_kmburst_l2norm(int test_case, Tensor<float, 3, true, int>& outDists, Tensor<float, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<float, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<float, 4, true, int>& ave, Tensor<float, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, cudaStream_t stream){ test_kmburst_l2norm<float>(test_case, outDists, burst, blocks, centroids, clusters, ave, modes, vals, patchsize, offset, stream); } void test_kmburst_l2norm(int test_case, Tensor<float, 3, true, int>& outDists, Tensor<half, 4, true, int>& burst, Tensor<int, 5, true, int>& blocks, Tensor<half, 5, true, int>& centroids, Tensor<uint8_t, 4, true, int>& clusters, Tensor<half, 4, true, int>& ave, Tensor<half, 4, true, int>& modes, Tensor<float, 3, true, int>& vals, int patchsize, float offset, cudaStream_t stream){ test_kmburst_l2norm<half>(test_case, outDists, burst, blocks,centroids, clusters, ave, modes, vals, patchsize, offset, stream); } } }
61deecdde3022bf4e7df578cd97291a51c5798f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //--blockDim=[4,2] --gridDim=[2,2] -DWIDTH=8 -DHEIGHT=8 -DTILE_DIM=4 -DBLOCK_ROWS=2 // Example taken from transpose benchmark in the CUDA SDK (v5.0) // Note fly in ointment with threadIdx.y invariant (should investigate) #define WIDTH 8 #define HEIGHT 8 #define TILE_DIM 4 #define BLOCK_ROWS 2 __global__ void transpose(float *odata, float *idata, int width, int height) { __requires(width == WIDTH); __requires(height == HEIGHT); // additional preconditions that we check __assert(blockDim.x == TILE_DIM); __assert(blockDim.y == BLOCK_ROWS); __assert(width == gridDim.x * TILE_DIM); __assert(height == gridDim.y * TILE_DIM); int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int i=0; __invariant(__mod_pow2(i, BLOCK_ROWS) == 0), __invariant(0 <= i), __invariant(i <= TILE_DIM), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) / HEIGHT % TILE_DIM == threadIdx.x)), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) % HEIGHT % TILE_DIM % BLOCK_ROWS == threadIdx.y)), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) / HEIGHT / TILE_DIM == blockIdx.x)), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) % HEIGHT / TILE_DIM == blockIdx.y)), i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } }
61deecdde3022bf4e7df578cd97291a51c5798f4.cu
//--blockDim=[4,2] --gridDim=[2,2] -DWIDTH=8 -DHEIGHT=8 -DTILE_DIM=4 -DBLOCK_ROWS=2 // Example taken from transpose benchmark in the CUDA SDK (v5.0) // Note fly in ointment with threadIdx.y invariant (should investigate) #define WIDTH 8 #define HEIGHT 8 #define TILE_DIM 4 #define BLOCK_ROWS 2 __global__ void transpose(float *odata, float *idata, int width, int height) { __requires(width == WIDTH); __requires(height == HEIGHT); // additional preconditions that we check __assert(blockDim.x == TILE_DIM); __assert(blockDim.y == BLOCK_ROWS); __assert(width == gridDim.x * TILE_DIM); __assert(height == gridDim.y * TILE_DIM); int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int i=0; __invariant(__mod_pow2(i, BLOCK_ROWS) == 0), __invariant(0 <= i), __invariant(i <= TILE_DIM), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) / HEIGHT % TILE_DIM == threadIdx.x)), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) % HEIGHT % TILE_DIM % BLOCK_ROWS == threadIdx.y)), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) / HEIGHT / TILE_DIM == blockIdx.x)), __invariant(__write_implies(odata, __write_offset_bytes(odata)/sizeof(float) % HEIGHT / TILE_DIM == blockIdx.y)), i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } }
ae38f707293cd8e45f0148a72cd30191d9b59839.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <assert.h> #include "SphereCreator.h" #include "AleaTools.h" #include "MathTools.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Constructeur *| \*-------------------------------------*/ SphereCreator::SphereCreator(int nbSpheres, int w, int h, int bord) { assert(nbSpheres >= 4); // Inputs this->nbSpheres = nbSpheres; this->w = w; this->h = h; this->bord = bord; // Tools this->tabSphere = new Sphere[nbSpheres]; // required default constructeur of type : sphere(void) createSphereAll(); } SphereCreator::~SphereCreator() { delete[] tabSphere; } /*--------------------------------------*\ |* Methode *| \*-------------------------------------*/ Sphere* SphereCreator::getTabSphere() { return tabSphere; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ void SphereCreator::createSphereAll(void) { createSphereBack(); createSphereFront(); createSphereAleatoire(); } /** * La premiere, centre, trs grande, au fond */ void SphereCreator::createSphereBack(void) { float3 centre; centre.x = h / 2; centre.y = w / 2; centre.z = ZMAX * 2; // plus grand que ZMAX float rayon = ::min(w / 2, h / 2); float hue01 = 1; tabSphere[0] = Sphere(centre, rayon, hue01); } /** * Les dernieres, centres, trs petite, trs proche */ void SphereCreator::createSphereFront(void) { float hue01 = 1; float rayon = ::min(w / 10, h / 10); // disons float z = ZMIN - (rayon + rayonMax()); float dhue = 0.2; float dz = 40; float drayon = dz / 2; for (int i = nbSpheres - 3; i < nbSpheres; i++) { float3 centre; z -= dz; rayon -= drayon; hue01 -= dhue; centre.x = h / 2; centre.y = w / 2; centre.z = z; tabSphere[i] = Sphere(centre, rayon, hue01); // Warning : sphere stack copier dans sphere heap, ok car pas ptr dans Sphere } } /** * Toute sauf la premiere */ void SphereCreator::createSphereAleatoire(void) { AleaTools aleaTools = AleaTools(); for (int i = 1; i < nbSpheres - 3; i++) { float3 centre; centre.x = aleaTools.uniformeAB(0 + bord, h - bord); centre.y = aleaTools.uniformeAB(0 + bord, w - bord); centre.z = aleaTools.uniformeAB(ZMIN, ZMAX); float rayon = aleaTools.uniformeAB(20, rayonMax()); float hue01 = aleaTools.uniforme01(); tabSphere[i] = Sphere(centre, rayon, hue01); // Warning : sphere stack copier dans sphere heap, ok car pas ptr dans Sphere } } int SphereCreator::rayonMax() { return w / 10; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
ae38f707293cd8e45f0148a72cd30191d9b59839.cu
#include <iostream> #include <assert.h> #include "SphereCreator.h" #include "AleaTools.h" #include "MathTools.h" using std::cout; using std::endl; /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Constructeur *| \*-------------------------------------*/ SphereCreator::SphereCreator(int nbSpheres, int w, int h, int bord) { assert(nbSpheres >= 4); // Inputs this->nbSpheres = nbSpheres; this->w = w; this->h = h; this->bord = bord; // Tools this->tabSphere = new Sphere[nbSpheres]; // required default constructeur of type : sphere(void) createSphereAll(); } SphereCreator::~SphereCreator() { delete[] tabSphere; } /*--------------------------------------*\ |* Methode *| \*-------------------------------------*/ Sphere* SphereCreator::getTabSphere() { return tabSphere; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ void SphereCreator::createSphereAll(void) { createSphereBack(); createSphereFront(); createSphereAleatoire(); } /** * La premiere, centrée, très grande, au fond */ void SphereCreator::createSphereBack(void) { float3 centre; centre.x = h / 2; centre.y = w / 2; centre.z = ZMAX * 2; // plus grand que ZMAX float rayon = std::min(w / 2, h / 2); float hue01 = 1; tabSphere[0] = Sphere(centre, rayon, hue01); } /** * Les dernieres, centrées, très petite, très proche */ void SphereCreator::createSphereFront(void) { float hue01 = 1; float rayon = std::min(w / 10, h / 10); // disons float z = ZMIN - (rayon + rayonMax()); float dhue = 0.2; float dz = 40; float drayon = dz / 2; for (int i = nbSpheres - 3; i < nbSpheres; i++) { float3 centre; z -= dz; rayon -= drayon; hue01 -= dhue; centre.x = h / 2; centre.y = w / 2; centre.z = z; tabSphere[i] = Sphere(centre, rayon, hue01); // Warning : sphere stack copier dans sphere heap, ok car pas ptr dans Sphere } } /** * Toute sauf la premiere */ void SphereCreator::createSphereAleatoire(void) { AleaTools aleaTools = AleaTools(); for (int i = 1; i < nbSpheres - 3; i++) { float3 centre; centre.x = aleaTools.uniformeAB(0 + bord, h - bord); centre.y = aleaTools.uniformeAB(0 + bord, w - bord); centre.z = aleaTools.uniformeAB(ZMIN, ZMAX); float rayon = aleaTools.uniformeAB(20, rayonMax()); float hue01 = aleaTools.uniforme01(); tabSphere[i] = Sphere(centre, rayon, hue01); // Warning : sphere stack copier dans sphere heap, ok car pas ptr dans Sphere } } int SphereCreator::rayonMax() { return w / 10; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
49b0b42adeaa2743b1ce3637e35784377ed706c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by LBY, LY // ------------------------------------------------------------------ #include <cfloat> #include "caffe/layers/point_pooling_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void MaxPointPoolForward(const int nthreads, const Dtype* bottom_data, const int ncls, int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, Dtype* top_data, int* argmax_data, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; cls_ch += cls * 2; argmax_data += n * channels; bottom_ids += n; int roi_batch_ind = bottom_ids[0]; int ch_len = cls_ch[1] - cls_ch[0] + 1; for (int ch = cls_ch[0]; ch <= cls_ch[1]; ch++) { argmax_data[ch] = -1; const Dtype* feat_map = bottom_data + (roi_batch_ind * channels + ch) * height * width; const Dtype* pnt = bottom_points + (n * channels + ch) * 4; const Dtype* conf = bottom_points_conf + n * channels + ch; if (use_valid_channel) { bool is_valid = conf[0] > conf_th; if (!is_valid) { ch_len--; continue; // the point is absent } } int x1 = floor(pnt[0] * spatial_scale); int y1 = floor(pnt[1] * spatial_scale); int x2 = ceil(pnt[2] * spatial_scale); int y2 = ceil(pnt[3] * spatial_scale); x1 = min(max(x1, 0), width - 1); y1 = min(max(y1, 0), height - 1); x2 = min(max(x2, 0), width - 1); y2 = min(max(y2, 0), height - 1); Dtype maxval = -FLT_MAX; for (int h=y1; h<=y2; h++) { for (int w=x1; w<=x2; w++) { int ind = h * width + w; if (feat_map[ind] > maxval) { maxval = feat_map[ind]; argmax_data[ch] = ind; } } } if (!use_valid_channel) // use all channels, use confidence as weight maxval *= conf[0]; top_data[index] += maxval; } if (ch_len > 0) top_data[index] /= ch_len; } } template <typename Dtype> __global__ void AvePointPoolForward(const int nthreads, const Dtype* bottom_data, const int ncls, int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, Dtype* top_data, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; const int* cls_channel = cls_ch + cls * 2; int roi_batch_ind = bottom_ids[n]; int ch_len = cls_channel[1] - cls_channel[0] + 1; for (int ch = cls_channel[0]; ch <= cls_channel[1]; ch++) { const Dtype* feat_map = bottom_data + (roi_batch_ind * channels + ch) * height * width; const Dtype* pnt = bottom_points + (n * channels + ch) * 4; const Dtype* conf = bottom_points_conf + n * channels + ch; if (use_valid_channel) { if (conf[0] < conf_th) { ch_len--; continue; // the point is absent } } int x1 = floor(pnt[0] * spatial_scale); int y1 = floor(pnt[1] * spatial_scale); int x2 = ceil(pnt[2] * spatial_scale); int y2 = ceil(pnt[3] * spatial_scale); x1 = min(max(x1, 0), width - 1); y1 = min(max(y1, 0), height - 1); x2 = min(max(x2, 0), width - 1); y2 = min(max(y2, 0), height - 1); Dtype avgval = 0; for (int h=y1; h<=y2; h++) { for (int w=x1; w<=x2; w++) { int ind = h * width + w; avgval += feat_map[ind]; } } if (!use_valid_channel) { // use all channels, use confidence as weight avgval *= conf[0]; } top_data[index] += avgval / (y2 - y1 + 1) / (x2 - x1 + 1); } if (ch_len > 0) { top_data[index] /= ch_len; } } } template <typename Dtype> void PointPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_ids = bottom[1]->gpu_data(); // n_roi * 1 * 1 * 1 const Dtype* bottom_points = bottom[2]->gpu_data(); // n_roi * all_pnt_num * 4 * 1 const Dtype* bottom_points_conf = bottom[3]->gpu_data(); // n_roi * all_pnt_num * 1 * 1 const int* cls_ch = class_channel_.gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); // NOLINT_NEXT_LINE(whitespace/operators) if (use_maxpool_) hipLaunchKernelGGL(( MaxPointPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_ids, bottom_points, bottom_points_conf, top_data, argmax_data, use_valid_channel_, conf_th_); else hipLaunchKernelGGL(( AvePointPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_ids, bottom_points, bottom_points_conf, top_data, use_valid_channel_, conf_th_); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPointPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const int ncls, const int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, Dtype* bottom_diff, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; cls_ch += cls * 2; argmax_data += n * channels; bottom_ids += n; int roi_batch_ind = bottom_ids[0]; int ch_len = cls_ch[1] - cls_ch[0] + 1; if (use_valid_channel) { for (int ch = cls_ch[0]; ch <= cls_ch[1]; ch++) { const Dtype* conf = bottom_points_conf + n * channels + ch; bool is_valid = conf[0] > conf_th; if (!is_valid) { ch_len--; } } } for (int ch = cls_ch[0]; ch <= cls_ch[1]; ch++) { Dtype* diff = bottom_diff + (roi_batch_ind * channels + ch) * height * width; int ind = argmax_data[ch]; if (ind > -1) { if (use_valid_channel) { diff[ind] += top_diff[index] / ch_len; } else { const Dtype* conf = bottom_points_conf + n * channels + ch; diff[ind] += top_diff[index] * conf[0] / ch_len; } } } } } template <typename Dtype> __global__ void AvePointPoolBackward(const int nthreads, const Dtype* top_diff, const int num_rois, const int ncls, const int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, Dtype* bottom_diff, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; const int* cls_channel = cls_ch + cls * 2; int roi_batch_ind = bottom_ids[n]; int ch_len = cls_channel[1] - cls_channel[0] + 1; if (use_valid_channel) { for (int ch = cls_channel[0]; ch <= cls_channel[1]; ch++) { const Dtype* conf = bottom_points_conf + n * channels + ch; if (conf[0] < conf_th) { ch_len--; } } } for (int ch = cls_channel[0]; ch <= cls_channel[1]; ch++) { Dtype* diff = bottom_diff + (roi_batch_ind * channels + ch) * height * width; const Dtype* pnt = bottom_points + (n * channels + ch) * 4; const Dtype* conf = bottom_points_conf + n * channels + ch; if (use_valid_channel) { if (conf[0] < conf_th) { continue; // the point is absent } } int x1 = floor(pnt[0] * spatial_scale); int y1 = floor(pnt[1] * spatial_scale); int x2 = ceil(pnt[2] * spatial_scale); int y2 = ceil(pnt[3] * spatial_scale); x1 = min(max(x1, 0), width - 1); y1 = min(max(y1, 0), height - 1); x2 = min(max(x2, 0), width - 1); y2 = min(max(y2, 0), height - 1); for (int h=y1; h<=y2; h++) { for (int w=x1; w<=x2; w++) { int ind = h * width + w; if (use_valid_channel) { Dtype diff_val = top_diff[index] / ch_len /(y2 - y1 + 1) / (x2 - x1 + 1); caffe_gpu_atomic_add(diff_val, diff + ind); } else { Dtype diff_val = top_diff[index] * conf[0] / ch_len / (y2 - y1 + 1) / (x2 - x1 + 1); caffe_gpu_atomic_add(diff_val, diff + ind); } } } } } } template <typename Dtype> void PointPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_ids = bottom[1]->gpu_data(); // n_roi * 1 * 1 * 1 const Dtype* bottom_points = bottom[2]->gpu_data(); // n_roi * all_pnt_num * 4 * 1 const Dtype* bottom_points_conf = bottom[3]->gpu_data(); // n_roi * all_pnt_num * 1 * 1 const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = top[0]->count(); caffe_gpu_set(bottom[0]->count(), Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); const int* cls_ch = class_channel_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) if (use_maxpool_) hipLaunchKernelGGL(( MaxPointPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_diff, bottom_ids, bottom_points, bottom_points_conf, use_valid_channel_, conf_th_); else hipLaunchKernelGGL(( AvePointPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_diff, bottom_ids, bottom_points, bottom_points_conf, use_valid_channel_, conf_th_); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PointPoolingLayer); } // namespace caffe
49b0b42adeaa2743b1ce3637e35784377ed706c5.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by LBY, LY // ------------------------------------------------------------------ #include <cfloat> #include "caffe/layers/point_pooling_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void MaxPointPoolForward(const int nthreads, const Dtype* bottom_data, const int ncls, int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, Dtype* top_data, int* argmax_data, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; cls_ch += cls * 2; argmax_data += n * channels; bottom_ids += n; int roi_batch_ind = bottom_ids[0]; int ch_len = cls_ch[1] - cls_ch[0] + 1; for (int ch = cls_ch[0]; ch <= cls_ch[1]; ch++) { argmax_data[ch] = -1; const Dtype* feat_map = bottom_data + (roi_batch_ind * channels + ch) * height * width; const Dtype* pnt = bottom_points + (n * channels + ch) * 4; const Dtype* conf = bottom_points_conf + n * channels + ch; if (use_valid_channel) { bool is_valid = conf[0] > conf_th; if (!is_valid) { ch_len--; continue; // the point is absent } } int x1 = floor(pnt[0] * spatial_scale); int y1 = floor(pnt[1] * spatial_scale); int x2 = ceil(pnt[2] * spatial_scale); int y2 = ceil(pnt[3] * spatial_scale); x1 = min(max(x1, 0), width - 1); y1 = min(max(y1, 0), height - 1); x2 = min(max(x2, 0), width - 1); y2 = min(max(y2, 0), height - 1); Dtype maxval = -FLT_MAX; for (int h=y1; h<=y2; h++) { for (int w=x1; w<=x2; w++) { int ind = h * width + w; if (feat_map[ind] > maxval) { maxval = feat_map[ind]; argmax_data[ch] = ind; } } } if (!use_valid_channel) // use all channels, use confidence as weight maxval *= conf[0]; top_data[index] += maxval; } if (ch_len > 0) top_data[index] /= ch_len; } } template <typename Dtype> __global__ void AvePointPoolForward(const int nthreads, const Dtype* bottom_data, const int ncls, int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, Dtype* top_data, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; const int* cls_channel = cls_ch + cls * 2; int roi_batch_ind = bottom_ids[n]; int ch_len = cls_channel[1] - cls_channel[0] + 1; for (int ch = cls_channel[0]; ch <= cls_channel[1]; ch++) { const Dtype* feat_map = bottom_data + (roi_batch_ind * channels + ch) * height * width; const Dtype* pnt = bottom_points + (n * channels + ch) * 4; const Dtype* conf = bottom_points_conf + n * channels + ch; if (use_valid_channel) { if (conf[0] < conf_th) { ch_len--; continue; // the point is absent } } int x1 = floor(pnt[0] * spatial_scale); int y1 = floor(pnt[1] * spatial_scale); int x2 = ceil(pnt[2] * spatial_scale); int y2 = ceil(pnt[3] * spatial_scale); x1 = min(max(x1, 0), width - 1); y1 = min(max(y1, 0), height - 1); x2 = min(max(x2, 0), width - 1); y2 = min(max(y2, 0), height - 1); Dtype avgval = 0; for (int h=y1; h<=y2; h++) { for (int w=x1; w<=x2; w++) { int ind = h * width + w; avgval += feat_map[ind]; } } if (!use_valid_channel) { // use all channels, use confidence as weight avgval *= conf[0]; } top_data[index] += avgval / (y2 - y1 + 1) / (x2 - x1 + 1); } if (ch_len > 0) { top_data[index] /= ch_len; } } } template <typename Dtype> void PointPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_ids = bottom[1]->gpu_data(); // n_roi * 1 * 1 * 1 const Dtype* bottom_points = bottom[2]->gpu_data(); // n_roi * all_pnt_num * 4 * 1 const Dtype* bottom_points_conf = bottom[3]->gpu_data(); // n_roi * all_pnt_num * 1 * 1 const int* cls_ch = class_channel_.gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); // NOLINT_NEXT_LINE(whitespace/operators) if (use_maxpool_) MaxPointPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_ids, bottom_points, bottom_points_conf, top_data, argmax_data, use_valid_channel_, conf_th_); else AvePointPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_ids, bottom_points, bottom_points_conf, top_data, use_valid_channel_, conf_th_); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPointPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const int ncls, const int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, Dtype* bottom_diff, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; cls_ch += cls * 2; argmax_data += n * channels; bottom_ids += n; int roi_batch_ind = bottom_ids[0]; int ch_len = cls_ch[1] - cls_ch[0] + 1; if (use_valid_channel) { for (int ch = cls_ch[0]; ch <= cls_ch[1]; ch++) { const Dtype* conf = bottom_points_conf + n * channels + ch; bool is_valid = conf[0] > conf_th; if (!is_valid) { ch_len--; } } } for (int ch = cls_ch[0]; ch <= cls_ch[1]; ch++) { Dtype* diff = bottom_diff + (roi_batch_ind * channels + ch) * height * width; int ind = argmax_data[ch]; if (ind > -1) { if (use_valid_channel) { diff[ind] += top_diff[index] / ch_len; } else { const Dtype* conf = bottom_points_conf + n * channels + ch; diff[ind] += top_diff[index] * conf[0] / ch_len; } } } } } template <typename Dtype> __global__ void AvePointPoolBackward(const int nthreads, const Dtype* top_diff, const int num_rois, const int ncls, const int channels, const int height, const int width, const int* cls_ch, const Dtype spatial_scale, Dtype* bottom_diff, const Dtype* bottom_ids, const Dtype* bottom_points, const Dtype* bottom_points_conf, const bool use_valid_channel, const Dtype conf_th) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, cls, 1, 1) is an element in the pooled output, n represents roi id int cls = index % ncls; int n = index / ncls; const int* cls_channel = cls_ch + cls * 2; int roi_batch_ind = bottom_ids[n]; int ch_len = cls_channel[1] - cls_channel[0] + 1; if (use_valid_channel) { for (int ch = cls_channel[0]; ch <= cls_channel[1]; ch++) { const Dtype* conf = bottom_points_conf + n * channels + ch; if (conf[0] < conf_th) { ch_len--; } } } for (int ch = cls_channel[0]; ch <= cls_channel[1]; ch++) { Dtype* diff = bottom_diff + (roi_batch_ind * channels + ch) * height * width; const Dtype* pnt = bottom_points + (n * channels + ch) * 4; const Dtype* conf = bottom_points_conf + n * channels + ch; if (use_valid_channel) { if (conf[0] < conf_th) { continue; // the point is absent } } int x1 = floor(pnt[0] * spatial_scale); int y1 = floor(pnt[1] * spatial_scale); int x2 = ceil(pnt[2] * spatial_scale); int y2 = ceil(pnt[3] * spatial_scale); x1 = min(max(x1, 0), width - 1); y1 = min(max(y1, 0), height - 1); x2 = min(max(x2, 0), width - 1); y2 = min(max(y2, 0), height - 1); for (int h=y1; h<=y2; h++) { for (int w=x1; w<=x2; w++) { int ind = h * width + w; if (use_valid_channel) { Dtype diff_val = top_diff[index] / ch_len /(y2 - y1 + 1) / (x2 - x1 + 1); caffe_gpu_atomic_add(diff_val, diff + ind); } else { Dtype diff_val = top_diff[index] * conf[0] / ch_len / (y2 - y1 + 1) / (x2 - x1 + 1); caffe_gpu_atomic_add(diff_val, diff + ind); } } } } } } template <typename Dtype> void PointPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_ids = bottom[1]->gpu_data(); // n_roi * 1 * 1 * 1 const Dtype* bottom_points = bottom[2]->gpu_data(); // n_roi * all_pnt_num * 4 * 1 const Dtype* bottom_points_conf = bottom[3]->gpu_data(); // n_roi * all_pnt_num * 1 * 1 const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = top[0]->count(); caffe_gpu_set(bottom[0]->count(), Dtype(0.), bottom_diff); const int* argmax_data = max_idx_.gpu_data(); const int* cls_ch = class_channel_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) if (use_maxpool_) MaxPointPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_diff, bottom_ids, bottom_points, bottom_points_conf, use_valid_channel_, conf_th_); else AvePointPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), ncls_, channels_, height_, width_, cls_ch, spatial_scale_, bottom_diff, bottom_ids, bottom_points, bottom_points_conf, use_valid_channel_, conf_th_); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PointPoolingLayer); } // namespace caffe
d1989101446f1ef8bef0fa7a3fd47ea74b2567ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ inline void bres_calc_gpu(const float *x1, const float *x2, const float *q1, const float *adt1, float *res1, const int *bound) { float dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0] - x2[0]; dy = x1[1] - x2[1]; ri = 1.0f/q1[0]; p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2])); if (*bound==1) { res1[1] += + p1*dy; res1[2] += - p1*dx; } else { vol1 = ri*(q1[1]*dy - q1[2]*dx); ri = 1.0f/qinf[0]; p2 = gm1*(qinf[3]-0.5f*ri*(qinf[1]*qinf[1]+qinf[2]*qinf[2])); vol2 = ri*(qinf[1]*dy - qinf[2]*dx); mu = (*adt1)*eps; f = 0.5f*(vol1* q1[0] + vol2* qinf[0] ) + mu*(q1[0]-qinf[0]); res1[0] += f; f = 0.5f*(vol1* q1[1] + p1*dy + vol2* qinf[1] + p2*dy) + mu*(q1[1]-qinf[1]); res1[1] += f; f = 0.5f*(vol1* q1[2] - p1*dx + vol2* qinf[2] - p2*dx) + mu*(q1[2]-qinf[2]); res1[2] += f; f = 0.5f*(vol1*(q1[3]+p1) + vol2*(qinf[3]+p2) ) + mu*(q1[3]-qinf[3]); res1[3] += f; } } // CUDA kernel function __global__ void op_cuda_bres_calc( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const float *__restrict ind_arg2, float *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, const int *__restrict arg5, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg4_l[4]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n = n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg4_l[d] = ZERO_float; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; //user-supplied kernel call bres_calc_gpu(ind_arg0+map0idx*2, ind_arg0+map1idx*2, ind_arg1+map2idx*4, ind_arg2+map2idx*1, arg4_l, arg5+(n+offset_b)*1); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg4_l[0] += ind_arg3[0+map2idx*4]; arg4_l[1] += ind_arg3[1+map2idx*4]; arg4_l[2] += ind_arg3[2+map2idx*4]; arg4_l[3] += ind_arg3[3+map2idx*4]; ind_arg3[0+map2idx*4] = arg4_l[0]; ind_arg3[1+map2idx*4] = arg4_l[1]; ind_arg3[2+map2idx*4] = arg4_l[2]; ind_arg3[3+map2idx*4] = arg4_l[3]; } __syncthreads(); } } } //GPU host stub function void op_par_loop_bres_calc_gpu(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(3); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[3].name = name; OP_kernels[3].count += 1; if (OP_kernels[3].count==1) op_register_strides(); int ninds = 4; int inds[6] = {0,0,1,2,3,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: bres_calc\n"); } //get plan #ifdef OP_PART_SIZE_3 int part_size = OP_PART_SIZE_3; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_3 int nthread = OP_BLOCK_SIZE_3; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread), 0, 0, (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg3.data_d, (float *)arg4.data_d, arg0.map_data_d, arg2.map_data_d, (int*)arg5.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[3].transfer += Plan->transfer; OP_kernels[3].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[3].time += wall_t2 - wall_t1; } void op_par_loop_bres_calc_cpu(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5); //GPU host stub function #if OP_HYBRID_GPU void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ if (OP_hybrid_gpu) { op_par_loop_bres_calc_gpu(name, set, arg0, arg1, arg2, arg3, arg4, arg5); }else{ op_par_loop_bres_calc_cpu(name, set, arg0, arg1, arg2, arg3, arg4, arg5); } } #else void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ op_par_loop_bres_calc_gpu(name, set, arg0, arg1, arg2, arg3, arg4, arg5); } #endif //OP_HYBRID_GPU
d1989101446f1ef8bef0fa7a3fd47ea74b2567ea.cu
// // auto-generated by op2.py // //user function __device__ inline void bres_calc_gpu(const float *x1, const float *x2, const float *q1, const float *adt1, float *res1, const int *bound) { float dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0] - x2[0]; dy = x1[1] - x2[1]; ri = 1.0f/q1[0]; p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2])); if (*bound==1) { res1[1] += + p1*dy; res1[2] += - p1*dx; } else { vol1 = ri*(q1[1]*dy - q1[2]*dx); ri = 1.0f/qinf[0]; p2 = gm1*(qinf[3]-0.5f*ri*(qinf[1]*qinf[1]+qinf[2]*qinf[2])); vol2 = ri*(qinf[1]*dy - qinf[2]*dx); mu = (*adt1)*eps; f = 0.5f*(vol1* q1[0] + vol2* qinf[0] ) + mu*(q1[0]-qinf[0]); res1[0] += f; f = 0.5f*(vol1* q1[1] + p1*dy + vol2* qinf[1] + p2*dy) + mu*(q1[1]-qinf[1]); res1[1] += f; f = 0.5f*(vol1* q1[2] - p1*dx + vol2* qinf[2] - p2*dx) + mu*(q1[2]-qinf[2]); res1[2] += f; f = 0.5f*(vol1*(q1[3]+p1) + vol2*(qinf[3]+p2) ) + mu*(q1[3]-qinf[3]); res1[3] += f; } } // CUDA kernel function __global__ void op_cuda_bres_calc( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const float *__restrict ind_arg2, float *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, const int *__restrict arg5, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg4_l[4]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n = n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg4_l[d] = ZERO_float; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; //user-supplied kernel call bres_calc_gpu(ind_arg0+map0idx*2, ind_arg0+map1idx*2, ind_arg1+map2idx*4, ind_arg2+map2idx*1, arg4_l, arg5+(n+offset_b)*1); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg4_l[0] += ind_arg3[0+map2idx*4]; arg4_l[1] += ind_arg3[1+map2idx*4]; arg4_l[2] += ind_arg3[2+map2idx*4]; arg4_l[3] += ind_arg3[3+map2idx*4]; ind_arg3[0+map2idx*4] = arg4_l[0]; ind_arg3[1+map2idx*4] = arg4_l[1]; ind_arg3[2+map2idx*4] = arg4_l[2]; ind_arg3[3+map2idx*4] = arg4_l[3]; } __syncthreads(); } } } //GPU host stub function void op_par_loop_bres_calc_gpu(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(3); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[3].name = name; OP_kernels[3].count += 1; if (OP_kernels[3].count==1) op_register_strides(); int ninds = 4; int inds[6] = {0,0,1,2,3,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: bres_calc\n"); } //get plan #ifdef OP_PART_SIZE_3 int part_size = OP_PART_SIZE_3; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_3 int nthread = OP_BLOCK_SIZE_3; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { op_cuda_bres_calc<<<nblocks,nthread>>>( (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg3.data_d, (float *)arg4.data_d, arg0.map_data_d, arg2.map_data_d, (int*)arg5.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[3].transfer += Plan->transfer; OP_kernels[3].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[3].time += wall_t2 - wall_t1; } void op_par_loop_bres_calc_cpu(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5); //GPU host stub function #if OP_HYBRID_GPU void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ if (OP_hybrid_gpu) { op_par_loop_bres_calc_gpu(name, set, arg0, arg1, arg2, arg3, arg4, arg5); }else{ op_par_loop_bres_calc_cpu(name, set, arg0, arg1, arg2, arg3, arg4, arg5); } } #else void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5){ op_par_loop_bres_calc_gpu(name, set, arg0, arg1, arg2, arg3, arg4, arg5); } #endif //OP_HYBRID_GPU
51567738f078b355fa2d1b35da195e58619e4dc1.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************** C-DAC Tech Workshop : hyPACK-2013 October 15-18, 2013 Example : multipleKernels-multiGPU-streams-matrix-matrix-comp.cu url : http://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#top Objective : The objective is to demonstrate use of CUDA Synchronous and CUDA Asynchronous APIs with CUDA streams for simple addition of two nonsquare matrices & compare the execution time on multiGPU system. Matrix-Matrix Addition kernel is domonstrated Input : Number of kernels(optional, default is set to 16) Output : Execution-Type(Syn,Asyn),Execution Time in sec Relative-Error Created : August-2013 E-mail : [email protected] ********************************************************************************/ /* inclusion of header file that contains necessary declarions */ #include <pthread.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> #include <math.h> #include <assert.h> #define EPS 1.0e-14 /* threshhold aprrox epsilion value */ #define BLOCK_SIZE 8 #define NUMROWS 128 #define NUMCOLS 64 int numOfDevicesAvailable; long int hA, wA, hB, wB ,size; //holds height and width for MatrixA and MatrixB double *hAddMatMatA , *hAddMatMatB, *hAddMatMatC; // holds host matrices int nkernels; // holds total number of concurrent kernels /* function prototypes */ double matMatAddCheckResult (double *hAddMatMatA,double *hAddMatMatB,double *output,long int numRows,long int numCols); void memoryAlloc(long int hA, long int wA,long int hB, long int wB); /* Macro to check for correctness of CUDA API */ #define CUDA_SAFE_CALL(call){\ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(-1); \ }}\ /* * Fill in the matrix/vector with double precision values */ void fillInData(double* vec,int size) { int ind; for(ind=0;ind<size;ind++) vec[ind]=drand48() ; } /* *check mem error */ void memError(char *arrayname, char *benchmark, int len, char *type) { printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type); printf("\n\tAborting\n\n"); exit(-1); } /* *checl grid and block dimensions */ void checkBlockGridDim(hipDeviceProp_t devProp,dim3 blockDim,dim3 gridDim) { if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] ) { printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]); exit(-1); } if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] ) { printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]); exit(-1); } } /***************************************** * Matrix Matrix Addition ******************************************/ /* __global__ void kernelMatMatAdd(double *dInMatA, double *dInMatB,double *dInMatC, int matRowColSize, int threadDim) { int tidx = threadIdx.x; int tidy = threadIdx.y; int tindex = (threadDim * tidx) + tidy; // get thread index int maxNumThread = threadDim * threadDim; int pass = 0; int rowCount ; int curColInd ; while( (curColInd = (tindex + maxNumThread * pass)) < matRowColSize ) { for( rowCount = 0; rowCount < matRowColSize; rowCount++) { dInMatC[curColInd * matRowColSize + rowCount] = dInMatA[curColInd * matRowColSize + rowCount] + dInMatB[curColInd * matRowColSize + rowCount]; } pass++; // move to next column } __syncthreads(); } end of Mat Mat Add device code */ __global__ void kernelMatMatAdd(double *dInMatA, double *dInMatB,double *dInMatC, long int matRowSize, long int matColSize ,int threadDim) { int tidx = threadIdx.x; int tidy = threadIdx.y; int tindex = (threadDim * tidx) + tidy; // get thread index int maxNumThread = threadDim * threadDim; int pass = 0; int rowCount ; int curColInd ; while( (curColInd = (tindex + maxNumThread * pass)) < matColSize ) { for( rowCount = 0; rowCount < matRowSize; rowCount++) { dInMatC[curColInd * matRowSize + rowCount] = dInMatA[curColInd * matRowSize + rowCount] + dInMatB[curColInd * matRowSize + rowCount]; } pass++; // move to next column } __syncthreads(); }/* end of Mat Mat Add device code */ /*************************************************************** function to implement concurrent kernel execution ***************************************************************/ void funcAsynchConcurrentExec(double *dAddMatMatA, double *dAddMatMatB, double *dAddMatMatC,double *hAddMatMatA, double *hAddMatMatB, double *hAddMatMatC, int nkernels, int NSTREAM, hipStream_t *stream , long int hA, long int wA, long int hB, long int wB,hipDeviceProp_t deviceProp) { float elapsedTime; // holds timing variables hipError_t err; // holds error value /* create CUDA event handles */ hipEvent_t startEvent, stopEvent; CUDA_SAFE_CALL( hipEventCreate(&startEvent)); CUDA_SAFE_CALL( hipEventCreate(&stopEvent)); /* get all errors before kernel launch */ if ( err=hipGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, hipGetErrorString(err)); } /* define blocks and grids check grid and block dimension*/ dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); //threads per block dim3 dimGrid(1,1); //blocks per grid checkBlockGridDim(deviceProp,dimGrid, dimBlock); /* Asynchronous kernel execution */ hipEventRecord(startEvent); for( int i=0; i<nkernels; ++i) { /* mem copy from host to device asynchcronously */ CUDA_SAFE_CALL( hipMemcpyAsync(dAddMatMatA, hAddMatMatA, hA*wA*sizeof(double), hipMemcpyHostToDevice,stream[i])); CUDA_SAFE_CALL( hipMemcpyAsync(dAddMatMatB, hAddMatMatB, hB*wB*sizeof(double), hipMemcpyHostToDevice, stream[i])); CUDA_SAFE_CALL( hipMemcpyAsync(dAddMatMatC, hAddMatMatC, hA*wB*sizeof(double), hipMemcpyHostToDevice, stream[i])); } for( int i=0; i<nkernels; ++i) { // queue nkernels and record when they are done //kernelMatMatAdd<<<dimGrid, dimBlock, 0, stream[i]>>>(dAddMatMatA,dAddMatMatB, dAddMatMatC, SIZE,BLOCK_SIZE); hipLaunchKernelGGL(( kernelMatMatAdd), dim3(dimGrid), dim3(dimBlock), 0, stream[i], dAddMatMatA,dAddMatMatB, dAddMatMatC, NUMROWS,NUMCOLS,BLOCK_SIZE); } for( int i=0; i<nkernels; ++i) { /* copy output from device to host */ CUDA_SAFE_CALL( hipMemcpyAsync(hAddMatMatC, dAddMatMatC, hA*wB*sizeof(double), hipMemcpyDeviceToHost, stream[i])); } CUDA_SAFE_CALL( hipEventRecord(stopEvent)); CUDA_SAFE_CALL( hipEventSynchronize(stopEvent)); CUDA_SAFE_CALL( hipEventElapsedTime(&elapsedTime, startEvent, stopEvent)); /* get all errors from kernel launch */ if ( err=hipGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, hipGetErrorString(err)); } /* calculate measured time and gflops */ double tsecGpu; tsecGpu = (double) (elapsedTime * 1.0e-3); // converting to seconds from milliseconds /* check GPU results against CPU results */ double errorNorm = matMatAddCheckResult (hAddMatMatA,hAddMatMatB,hAddMatMatC,hA,wB); /* print output on screen */ printf("%s\t%f\t %e\t\n","Asynchronous Concurrent Execution",tsecGpu,errorNorm); /* relese GPU events */ hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } /************************************************************************ functions to execute multiple kernels without stream ************************************************************************/ void funcSynchExec(double *dAddMatMatA, double *dAddMatMatB, double *dAddMatMatC,double *hAddMatMatA, double *hAddMatMatB, double *hAddMatMatC, int nkernels,long int hA, long int wA, long int hB, long int wB, hipDeviceProp_t deviceProp) { float elapsedTime; // holds timing variables hipError_t err; // holds error value /* create CUDA event handles */ hipEvent_t startEvent, stopEvent; CUDA_SAFE_CALL( hipEventCreate(&startEvent)); CUDA_SAFE_CALL( hipEventCreate(&stopEvent)); /* get all errors before kernel launch */ if ( err=hipGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, hipGetErrorString(err)); } /* define blocks and grids check grid and block dimension*/ dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); //threads per block dim3 dimGrid(1,1); //blocks per grid checkBlockGridDim(deviceProp,dimGrid, dimBlock); /*Synchronous kernel execution */ hipEventRecord(startEvent, 0); for(int i=0;i<nkernels;i++) { /* mem copy from host to device asynchcronously */ CUDA_SAFE_CALL( hipMemcpy(dAddMatMatA, hAddMatMatA, hA*wA*sizeof(double), hipMemcpyHostToDevice)); CUDA_SAFE_CALL( hipMemcpy(dAddMatMatB, hAddMatMatB, hB*wB*sizeof(double), hipMemcpyHostToDevice)); CUDA_SAFE_CALL( hipMemcpy(dAddMatMatC, hAddMatMatC, hA*wB*sizeof(double), hipMemcpyHostToDevice)); } for( int i=0; i<nkernels; ++i) { // queue nkernels and record when they are done //kernelMatMatAdd<<<dimGrid, dimBlock>>>(dAddMatMatA,dAddMatMatB, dAddMatMatC, SIZE,BLOCK_SIZE); hipLaunchKernelGGL(( kernelMatMatAdd), dim3(dimGrid), dim3(dimBlock), 0, 0, dAddMatMatA,dAddMatMatB, dAddMatMatC, hA,wB,BLOCK_SIZE); } for( int i=0; i<nkernels; ++i) { /* copy output from device to host */ CUDA_SAFE_CALL( hipMemcpy(hAddMatMatC, dAddMatMatC, hA*wB*sizeof(double), hipMemcpyDeviceToHost)); } /* in this sample we just wait until the GPU is done */ CUDA_SAFE_CALL( hipEventRecord(stopEvent, 0) ); CUDA_SAFE_CALL( hipEventSynchronize(stopEvent) ); CUDA_SAFE_CALL( hipEventElapsedTime(&elapsedTime, startEvent, stopEvent) ); /* get all errors from kernel launch */ if ( err=hipGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, hipGetErrorString(err)); } /* calculate measured time and gflops */ double tsecGpu = (double) (elapsedTime * 1.0e-3); /* check CPU+GPU results against CPU results */ double errorNorm = matMatAddCheckResult (hAddMatMatA,hAddMatMatB,hAddMatMatC,hA,wB); /* print output on the screen */ printf("%s\t\t\t%f\t %e\t\n","Synchronous Execution",tsecGpu,errorNorm); /* release GPU event */ hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } /************************************************************ function to check the result with sequential result ***************************************************************/ double matMatAddCheckResult (double *hAddMatMatA,double *hAddMatMatB,double *outputGPU,long int numRows,long int numCols) { int j, flag=0; //Holds flag value double *outputCPU; //Holds sequential resultant output double errorNorm = 0.0; // HOlds Error norm value double eps=EPS; double relativeError=0.0; // Holds relative error assert((outputCPU = (double *)malloc( sizeof(double) * numRows*numCols))!=NULL); /*sequential Matrix Matrix Addition result*/ for( j=0 ; j<numRows*numCols ; j++) { outputCPU[j]= hAddMatMatA[j] + hAddMatMatB[j]; } /* check opencl result with sequential result*/ for( j=0 ; j < numRows*numCols ; j++) { if (fabs(outputCPU[j]) > fabs(outputGPU[j])) relativeError = fabs((outputCPU[j] - outputGPU[j]) / outputCPU[j]); else relativeError = fabs((outputGPU[j] - outputCPU[j]) / outputGPU[j]); if (relativeError > eps) { if(errorNorm < relativeError) { errorNorm = relativeError; flag=1; } } } if( flag == 1) { printf(" \n\t Results verfication : Failed"); printf(" \n\t Considered machine precision : %e", eps); printf(" \n\t Relative Error : %e", errorNorm); } if(flag==0) { } free(outputCPU); return errorNorm; } /* function to check device properties related to aynchronous execution */ void checkDeviceProperty(hipDeviceProp_t deviceProp) { //printf("\nDevice Used :\t %s",deviceProp.name); if( (deviceProp.concurrentKernels == 0 )) //check concurrent kernel support { printf("> GPU does not support concurrent kernel execution\n"); printf(" CUDA kernel runs will be serialized\n"); } if(deviceProp.asyncEngineCount == 0) //check concurrent data transfer support { printf("GPU does not support concurrent Data transer and overlaping of kernel execution & data transfer\n"); printf("Mem copy call will be blocking calls\n"); } } /* function to check for device availability */ void checkDeviceAvailability(int id) { hipError_t err; // holds error value err=hipSetDevice(id); //change this to set the code to another GPU if (err == hipErrorDevicesUnavailable) { printf("\ndevice %d Not available\n",id); exit(0); } } /* Function for memory allocation */ void memoryAlloc(long int hA, long int wA, long int hB,long int wB) { int size; /* memory allocate to matrices*/ CUDA_SAFE_CALL( hipHostMalloc((void**)&hAddMatMatA , hA * wA * sizeof(double))); CUDA_SAFE_CALL( hipHostMalloc((void**)&hAddMatMatB , hB * wB * sizeof(double))); CUDA_SAFE_CALL( hipHostMalloc((void**)&hAddMatMatC , hA * wB * sizeof(double))); /* initialize Matrices*/ fillInData(hAddMatMatA,hA*wA); fillInData(hAddMatMatB,hB*wB); for(int index = 0; index < hA*wB ; index++) hAddMatMatC[index] = 0; } /* Function to check command line arguments */ void check_cmdline_arg(int argc,char* argv[]) { switch(argc) { case 1: printf("\n Number of kernels not specified....default value will be taken\n"); nkernels = 16; break; case 2 : nkernels = atoi(argv[1]); // holds total number of concurrent kernels if(nkernels==0) { printf("\nWrong input....\n"); printf("\nUsage : <executable> [nkernels].........aborting \n"); exit(-1); } if(nkernels > 16) { printf("\n The maximum number of kernel launches that a device can execute concurrently is 16 \n"); printf("\n Kernels will may not be executed concurrently...... \n"); } break; default : printf("\n Invalid options...\n"); printf("\n Usage : <./exe> [nKernels] \n"); exit(-1); } } /* Thread function definition */ void* threadWork(int threadId) { double *dAddMatMatA, *dAddMatMatB, *dAddMatMatC; // holds device matrices hipDeviceProp_t deviceProp; hipStream_t *stream; // holds stream array int NSTREAM ,count,size ; // holds total number of streams NSTREAM = nkernels; checkDeviceAvailability(threadId); hipSetDevice(threadId); int device; hipGetDevice(&device); hipGetDeviceProperties(&deviceProp,device); /* call function to check device properties */ checkDeviceProperty(deviceProp); // function to check device properties size = hA * wA * sizeof(double); CUDA_SAFE_CALL( hipMalloc((void**) &dAddMatMatA, size)); /* allocate device memory*/ size = hB * wB * sizeof(double); CUDA_SAFE_CALL( hipMalloc((void**) &dAddMatMatB,size)); /* allocate device memory*/ size = hA * wB * sizeof(double); CUDA_SAFE_CALL( hipMalloc((void**) &dAddMatMatC,size)); for(count = 0 ; count < NSTREAM; count++) stream = (hipStream_t*) malloc(NSTREAM * sizeof(hipStream_t)); for(count = 0; count< NSTREAM; count++) CUDA_SAFE_CALL( hipStreamCreate(&(stream[count]))); /* print information on the screen */ printf("\n\tFor device %d : %s\n ",threadId,deviceProp.name); printf("\nNumber of kernels :\t %d", nkernels); printf("\nNOTE : TIME_SEC includes data transfer time from host to device, device to host and kernel time"); printf("\n\nExecution-Type\t\t\t\t Time_sec\t Relative-Error\n"); printf("======================================================================\n"); /* call function to execute Asynchronous kernels execution */ funcAsynchConcurrentExec(dAddMatMatA, dAddMatMatB, dAddMatMatC,hAddMatMatA, hAddMatMatB, hAddMatMatC, nkernels, NSTREAM, stream ,hA,wA,hB,wB,deviceProp); /* call function to execute synchronous kernels execution */ funcSynchExec(dAddMatMatA, dAddMatMatB, dAddMatMatC,hAddMatMatA, hAddMatMatB, hAddMatMatC, nkernels, hA,wA,hB,wB,deviceProp); printf("======================================================================\n"); /*********** Release all resources***************************/ /* destroy an array of stream handles */ for(count = 0; count< NSTREAM; count++) CUDA_SAFE_CALL( hipStreamDestroy((stream[count]))); hipFree(dAddMatMatA); hipFree(dAddMatMatB); hipFree(dAddMatMatC); return 0; } /***************************************************************************** main function ******************************************************************************/ int main(int argc, char *argv[]) { pthread_t *threads; int threadCount , threadStatus,numThreads; // get number of available devices CUDA_SAFE_CALL(hipGetDeviceCount(&numOfDevicesAvailable)); numThreads=numOfDevicesAvailable; int count; hA=hB=NUMROWS; wA=wB=NUMCOLS; count =0; // holds counter variables check_cmdline_arg(argc,argv); // function to check command line arguments /* function to allocate Host and Device matrices*/ memoryAlloc(hA,wA, hB,wB); assert(threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t))); // allocate memory for number of threads // call thread function for(threadCount = 0 ; threadCount < numThreads ; threadCount++) { threadStatus = pthread_create(&threads[threadCount], NULL, (void *(*) (void *))threadWork, (void *)(threadCount)); if(threadStatus) { printf("Error in creating the thread and the return status is %d \n",threadStatus); exit(-1); } } // join threads with main thread for(threadCount = 0 ; threadCount < numThreads ; threadCount++) { threadStatus = pthread_join(threads[threadCount], NULL); if(threadStatus) { printf("Error in joining the threads and the return status is %d \n",threadStatus); exit(-1); } } hipHostFree(hAddMatMatA); hipHostFree(hAddMatMatB); hipHostFree(hAddMatMatC); return 0; }
51567738f078b355fa2d1b35da195e58619e4dc1.cu
/************************************************************************** C-DAC Tech Workshop : hyPACK-2013 October 15-18, 2013 Example : multipleKernels-multiGPU-streams-matrix-matrix-comp.cu url : http://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#top Objective : The objective is to demonstrate use of CUDA Synchronous and CUDA Asynchronous APIs with CUDA streams for simple addition of two nonsquare matrices & compare the execution time on multiGPU system. Matrix-Matrix Addition kernel is domonstrated Input : Number of kernels(optional, default is set to 16) Output : Execution-Type(Syn,Asyn),Execution Time in sec Relative-Error Created : August-2013 E-mail : [email protected] ********************************************************************************/ /* inclusion of header file that contains necessary declarions */ #include <pthread.h> #include <stdio.h> #include <cuda.h> #include <time.h> #include <math.h> #include <assert.h> #define EPS 1.0e-14 /* threshhold aprrox epsilion value */ #define BLOCK_SIZE 8 #define NUMROWS 128 #define NUMCOLS 64 int numOfDevicesAvailable; long int hA, wA, hB, wB ,size; //holds height and width for MatrixA and MatrixB double *hAddMatMatA , *hAddMatMatB, *hAddMatMatC; // holds host matrices int nkernels; // holds total number of concurrent kernels /* function prototypes */ double matMatAddCheckResult (double *hAddMatMatA,double *hAddMatMatB,double *output,long int numRows,long int numCols); void memoryAlloc(long int hA, long int wA,long int hB, long int wB); /* Macro to check for correctness of CUDA API */ #define CUDA_SAFE_CALL(call){\ cudaError_t err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(-1); \ }}\ /* * Fill in the matrix/vector with double precision values */ void fillInData(double* vec,int size) { int ind; for(ind=0;ind<size;ind++) vec[ind]=drand48() ; } /* *check mem error */ void memError(char *arrayname, char *benchmark, int len, char *type) { printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type); printf("\n\tAborting\n\n"); exit(-1); } /* *checl grid and block dimensions */ void checkBlockGridDim(cudaDeviceProp devProp,dim3 blockDim,dim3 gridDim) { if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] ) { printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]); exit(-1); } if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] ) { printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]); exit(-1); } } /***************************************** * Matrix Matrix Addition ******************************************/ /* __global__ void kernelMatMatAdd(double *dInMatA, double *dInMatB,double *dInMatC, int matRowColSize, int threadDim) { int tidx = threadIdx.x; int tidy = threadIdx.y; int tindex = (threadDim * tidx) + tidy; // get thread index int maxNumThread = threadDim * threadDim; int pass = 0; int rowCount ; int curColInd ; while( (curColInd = (tindex + maxNumThread * pass)) < matRowColSize ) { for( rowCount = 0; rowCount < matRowColSize; rowCount++) { dInMatC[curColInd * matRowColSize + rowCount] = dInMatA[curColInd * matRowColSize + rowCount] + dInMatB[curColInd * matRowColSize + rowCount]; } pass++; // move to next column } __syncthreads(); } end of Mat Mat Add device code */ __global__ void kernelMatMatAdd(double *dInMatA, double *dInMatB,double *dInMatC, long int matRowSize, long int matColSize ,int threadDim) { int tidx = threadIdx.x; int tidy = threadIdx.y; int tindex = (threadDim * tidx) + tidy; // get thread index int maxNumThread = threadDim * threadDim; int pass = 0; int rowCount ; int curColInd ; while( (curColInd = (tindex + maxNumThread * pass)) < matColSize ) { for( rowCount = 0; rowCount < matRowSize; rowCount++) { dInMatC[curColInd * matRowSize + rowCount] = dInMatA[curColInd * matRowSize + rowCount] + dInMatB[curColInd * matRowSize + rowCount]; } pass++; // move to next column } __syncthreads(); }/* end of Mat Mat Add device code */ /*************************************************************** function to implement concurrent kernel execution ***************************************************************/ void funcAsynchConcurrentExec(double *dAddMatMatA, double *dAddMatMatB, double *dAddMatMatC,double *hAddMatMatA, double *hAddMatMatB, double *hAddMatMatC, int nkernels, int NSTREAM, cudaStream_t *stream , long int hA, long int wA, long int hB, long int wB,cudaDeviceProp deviceProp) { float elapsedTime; // holds timing variables cudaError_t err; // holds error value /* create CUDA event handles */ cudaEvent_t startEvent, stopEvent; CUDA_SAFE_CALL( cudaEventCreate(&startEvent)); CUDA_SAFE_CALL( cudaEventCreate(&stopEvent)); /* get all errors before kernel launch */ if ( err=cudaGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, cudaGetErrorString(err)); } /* define blocks and grids check grid and block dimension*/ dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); //threads per block dim3 dimGrid(1,1); //blocks per grid checkBlockGridDim(deviceProp,dimGrid, dimBlock); /* Asynchronous kernel execution */ cudaEventRecord(startEvent); for( int i=0; i<nkernels; ++i) { /* mem copy from host to device asynchcronously */ CUDA_SAFE_CALL( cudaMemcpyAsync(dAddMatMatA, hAddMatMatA, hA*wA*sizeof(double), cudaMemcpyHostToDevice,stream[i])); CUDA_SAFE_CALL( cudaMemcpyAsync(dAddMatMatB, hAddMatMatB, hB*wB*sizeof(double), cudaMemcpyHostToDevice, stream[i])); CUDA_SAFE_CALL( cudaMemcpyAsync(dAddMatMatC, hAddMatMatC, hA*wB*sizeof(double), cudaMemcpyHostToDevice, stream[i])); } for( int i=0; i<nkernels; ++i) { // queue nkernels and record when they are done //kernelMatMatAdd<<<dimGrid, dimBlock, 0, stream[i]>>>(dAddMatMatA,dAddMatMatB, dAddMatMatC, SIZE,BLOCK_SIZE); kernelMatMatAdd<<<dimGrid, dimBlock, 0, stream[i]>>>(dAddMatMatA,dAddMatMatB, dAddMatMatC, NUMROWS,NUMCOLS,BLOCK_SIZE); } for( int i=0; i<nkernels; ++i) { /* copy output from device to host */ CUDA_SAFE_CALL( cudaMemcpyAsync(hAddMatMatC, dAddMatMatC, hA*wB*sizeof(double), cudaMemcpyDeviceToHost, stream[i])); } CUDA_SAFE_CALL( cudaEventRecord(stopEvent)); CUDA_SAFE_CALL( cudaEventSynchronize(stopEvent)); CUDA_SAFE_CALL( cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent)); /* get all errors from kernel launch */ if ( err=cudaGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, cudaGetErrorString(err)); } /* calculate measured time and gflops */ double tsecGpu; tsecGpu = (double) (elapsedTime * 1.0e-3); // converting to seconds from milliseconds /* check GPU results against CPU results */ double errorNorm = matMatAddCheckResult (hAddMatMatA,hAddMatMatB,hAddMatMatC,hA,wB); /* print output on screen */ printf("%s\t%f\t %e\t\n","Asynchronous Concurrent Execution",tsecGpu,errorNorm); /* relese GPU events */ cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } /************************************************************************ functions to execute multiple kernels without stream ************************************************************************/ void funcSynchExec(double *dAddMatMatA, double *dAddMatMatB, double *dAddMatMatC,double *hAddMatMatA, double *hAddMatMatB, double *hAddMatMatC, int nkernels,long int hA, long int wA, long int hB, long int wB, cudaDeviceProp deviceProp) { float elapsedTime; // holds timing variables cudaError_t err; // holds error value /* create CUDA event handles */ cudaEvent_t startEvent, stopEvent; CUDA_SAFE_CALL( cudaEventCreate(&startEvent)); CUDA_SAFE_CALL( cudaEventCreate(&stopEvent)); /* get all errors before kernel launch */ if ( err=cudaGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, cudaGetErrorString(err)); } /* define blocks and grids check grid and block dimension*/ dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); //threads per block dim3 dimGrid(1,1); //blocks per grid checkBlockGridDim(deviceProp,dimGrid, dimBlock); /*Synchronous kernel execution */ cudaEventRecord(startEvent, 0); for(int i=0;i<nkernels;i++) { /* mem copy from host to device asynchcronously */ CUDA_SAFE_CALL( cudaMemcpy(dAddMatMatA, hAddMatMatA, hA*wA*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL( cudaMemcpy(dAddMatMatB, hAddMatMatB, hB*wB*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL( cudaMemcpy(dAddMatMatC, hAddMatMatC, hA*wB*sizeof(double), cudaMemcpyHostToDevice)); } for( int i=0; i<nkernels; ++i) { // queue nkernels and record when they are done //kernelMatMatAdd<<<dimGrid, dimBlock>>>(dAddMatMatA,dAddMatMatB, dAddMatMatC, SIZE,BLOCK_SIZE); kernelMatMatAdd<<<dimGrid, dimBlock>>>(dAddMatMatA,dAddMatMatB, dAddMatMatC, hA,wB,BLOCK_SIZE); } for( int i=0; i<nkernels; ++i) { /* copy output from device to host */ CUDA_SAFE_CALL( cudaMemcpy(hAddMatMatC, dAddMatMatC, hA*wB*sizeof(double), cudaMemcpyDeviceToHost)); } /* in this sample we just wait until the GPU is done */ CUDA_SAFE_CALL( cudaEventRecord(stopEvent, 0) ); CUDA_SAFE_CALL( cudaEventSynchronize(stopEvent) ); CUDA_SAFE_CALL( cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent) ); /* get all errors from kernel launch */ if ( err=cudaGetLastError()) { printf(" File : %s , Line : %d , Error : %s \n",__FILE__, __LINE__, cudaGetErrorString(err)); } /* calculate measured time and gflops */ double tsecGpu = (double) (elapsedTime * 1.0e-3); /* check CPU+GPU results against CPU results */ double errorNorm = matMatAddCheckResult (hAddMatMatA,hAddMatMatB,hAddMatMatC,hA,wB); /* print output on the screen */ printf("%s\t\t\t%f\t %e\t\n","Synchronous Execution",tsecGpu,errorNorm); /* release GPU event */ cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } /************************************************************ function to check the result with sequential result ***************************************************************/ double matMatAddCheckResult (double *hAddMatMatA,double *hAddMatMatB,double *outputGPU,long int numRows,long int numCols) { int j, flag=0; //Holds flag value double *outputCPU; //Holds sequential resultant output double errorNorm = 0.0; // HOlds Error norm value double eps=EPS; double relativeError=0.0; // Holds relative error assert((outputCPU = (double *)malloc( sizeof(double) * numRows*numCols))!=NULL); /*sequential Matrix Matrix Addition result*/ for( j=0 ; j<numRows*numCols ; j++) { outputCPU[j]= hAddMatMatA[j] + hAddMatMatB[j]; } /* check opencl result with sequential result*/ for( j=0 ; j < numRows*numCols ; j++) { if (fabs(outputCPU[j]) > fabs(outputGPU[j])) relativeError = fabs((outputCPU[j] - outputGPU[j]) / outputCPU[j]); else relativeError = fabs((outputGPU[j] - outputCPU[j]) / outputGPU[j]); if (relativeError > eps) { if(errorNorm < relativeError) { errorNorm = relativeError; flag=1; } } } if( flag == 1) { printf(" \n\t Results verfication : Failed"); printf(" \n\t Considered machine precision : %e", eps); printf(" \n\t Relative Error : %e", errorNorm); } if(flag==0) { } free(outputCPU); return errorNorm; } /* function to check device properties related to aynchronous execution */ void checkDeviceProperty(cudaDeviceProp deviceProp) { //printf("\nDevice Used :\t %s",deviceProp.name); if( (deviceProp.concurrentKernels == 0 )) //check concurrent kernel support { printf("> GPU does not support concurrent kernel execution\n"); printf(" CUDA kernel runs will be serialized\n"); } if(deviceProp.asyncEngineCount == 0) //check concurrent data transfer support { printf("GPU does not support concurrent Data transer and overlaping of kernel execution & data transfer\n"); printf("Mem copy call will be blocking calls\n"); } } /* function to check for device availability */ void checkDeviceAvailability(int id) { cudaError_t err; // holds error value err=cudaSetDevice(id); //change this to set the code to another GPU if (err == cudaErrorDevicesUnavailable) { printf("\ndevice %d Not available\n",id); exit(0); } } /* Function for memory allocation */ void memoryAlloc(long int hA, long int wA, long int hB,long int wB) { int size; /* memory allocate to matrices*/ CUDA_SAFE_CALL( cudaMallocHost((void**)&hAddMatMatA , hA * wA * sizeof(double))); CUDA_SAFE_CALL( cudaMallocHost((void**)&hAddMatMatB , hB * wB * sizeof(double))); CUDA_SAFE_CALL( cudaMallocHost((void**)&hAddMatMatC , hA * wB * sizeof(double))); /* initialize Matrices*/ fillInData(hAddMatMatA,hA*wA); fillInData(hAddMatMatB,hB*wB); for(int index = 0; index < hA*wB ; index++) hAddMatMatC[index] = 0; } /* Function to check command line arguments */ void check_cmdline_arg(int argc,char* argv[]) { switch(argc) { case 1: printf("\n Number of kernels not specified....default value will be taken\n"); nkernels = 16; break; case 2 : nkernels = atoi(argv[1]); // holds total number of concurrent kernels if(nkernels==0) { printf("\nWrong input....\n"); printf("\nUsage : <executable> [nkernels].........aborting \n"); exit(-1); } if(nkernels > 16) { printf("\n The maximum number of kernel launches that a device can execute concurrently is 16 \n"); printf("\n Kernels will may not be executed concurrently...... \n"); } break; default : printf("\n Invalid options...\n"); printf("\n Usage : <./exe> [nKernels] \n"); exit(-1); } } /* Thread function definition */ void* threadWork(int threadId) { double *dAddMatMatA, *dAddMatMatB, *dAddMatMatC; // holds device matrices cudaDeviceProp deviceProp; cudaStream_t *stream; // holds stream array int NSTREAM ,count,size ; // holds total number of streams NSTREAM = nkernels; checkDeviceAvailability(threadId); cudaSetDevice(threadId); int device; cudaGetDevice(&device); cudaGetDeviceProperties(&deviceProp,device); /* call function to check device properties */ checkDeviceProperty(deviceProp); // function to check device properties size = hA * wA * sizeof(double); CUDA_SAFE_CALL( cudaMalloc((void**) &dAddMatMatA, size)); /* allocate device memory*/ size = hB * wB * sizeof(double); CUDA_SAFE_CALL( cudaMalloc((void**) &dAddMatMatB,size)); /* allocate device memory*/ size = hA * wB * sizeof(double); CUDA_SAFE_CALL( cudaMalloc((void**) &dAddMatMatC,size)); for(count = 0 ; count < NSTREAM; count++) stream = (cudaStream_t*) malloc(NSTREAM * sizeof(cudaStream_t)); for(count = 0; count< NSTREAM; count++) CUDA_SAFE_CALL( cudaStreamCreate(&(stream[count]))); /* print information on the screen */ printf("\n\tFor device %d : %s\n ",threadId,deviceProp.name); printf("\nNumber of kernels :\t %d", nkernels); printf("\nNOTE : TIME_SEC includes data transfer time from host to device, device to host and kernel time"); printf("\n\nExecution-Type\t\t\t\t Time_sec\t Relative-Error\n"); printf("======================================================================\n"); /* call function to execute Asynchronous kernels execution */ funcAsynchConcurrentExec(dAddMatMatA, dAddMatMatB, dAddMatMatC,hAddMatMatA, hAddMatMatB, hAddMatMatC, nkernels, NSTREAM, stream ,hA,wA,hB,wB,deviceProp); /* call function to execute synchronous kernels execution */ funcSynchExec(dAddMatMatA, dAddMatMatB, dAddMatMatC,hAddMatMatA, hAddMatMatB, hAddMatMatC, nkernels, hA,wA,hB,wB,deviceProp); printf("======================================================================\n"); /*********** Release all resources***************************/ /* destroy an array of stream handles */ for(count = 0; count< NSTREAM; count++) CUDA_SAFE_CALL( cudaStreamDestroy((stream[count]))); cudaFree(dAddMatMatA); cudaFree(dAddMatMatB); cudaFree(dAddMatMatC); return 0; } /***************************************************************************** main function ******************************************************************************/ int main(int argc, char *argv[]) { pthread_t *threads; int threadCount , threadStatus,numThreads; // get number of available devices CUDA_SAFE_CALL(cudaGetDeviceCount(&numOfDevicesAvailable)); numThreads=numOfDevicesAvailable; int count; hA=hB=NUMROWS; wA=wB=NUMCOLS; count =0; // holds counter variables check_cmdline_arg(argc,argv); // function to check command line arguments /* function to allocate Host and Device matrices*/ memoryAlloc(hA,wA, hB,wB); assert(threads = (pthread_t *)malloc(numThreads * sizeof(pthread_t))); // allocate memory for number of threads // call thread function for(threadCount = 0 ; threadCount < numThreads ; threadCount++) { threadStatus = pthread_create(&threads[threadCount], NULL, (void *(*) (void *))threadWork, (void *)(threadCount)); if(threadStatus) { printf("Error in creating the thread and the return status is %d \n",threadStatus); exit(-1); } } // join threads with main thread for(threadCount = 0 ; threadCount < numThreads ; threadCount++) { threadStatus = pthread_join(threads[threadCount], NULL); if(threadStatus) { printf("Error in joining the threads and the return status is %d \n",threadStatus); exit(-1); } } cudaFreeHost(hAddMatMatA); cudaFreeHost(hAddMatMatB); cudaFreeHost(hAddMatMatC); return 0; }
699611d13dc5fcc804916947fb398bcd43ba1106.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> /** * * PARAMETERS * */ #define VERBOSE #define PROFILE //#define DRY_RUN #define INPUT_FILE "input.ppm" #define OUTPUT_FILE "output.ppm" #define FILTER_WIDTH 3 #define FILTER_HEIGHT 3 #define FILTER { \ { 0.05, 0.1, 0.05 }, \ { 0.1, 0.4, 0.1 }, \ { 0.05, 0.1, 0.05 }, \ } //#define CONSTANT_FILTER/* #define GLOBAL_FILTER/* #define LOCAL_FILTER/**/ #define GRID_DIM 32,32 #define BLOCK_DIM 32,32 #define SHARED_DIM 0 /** * * METADATA * */ #define CREATOR "Jae" /** * * CUDA UTILS * */ #define cuda_try( ans ) { __cuda_try((ans), __FILE__, __LINE__); } inline void __cuda_try( hipError_t code, const char * file, int line, bool abort=true ) { if (code != hipSuccess) { fprintf(stderr, "CUDA THROW %s CAUGHT AT %s LINE %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /** * * UTILS * */ #define RGB_COMPONENT_COLOR 255 #define STR_EXPAND(...) #__VA_ARGS__ #define ARG(...) STR_EXPAND(__VA_ARGS__) #define x_radius (FILTER_WIDTH / 2) #define y_radius (FILTER_HEIGHT / 2) #define split( n, among ) { ((n + (among - 1)) / among) } #if defined(CONSTANT_FILTER) && !defined(GLOBAL_FILTER) && !defined(LOCAL_FILTER) __constant__ float filter[FILTER_HEIGHT][FILTER_WIDTH] = FILTER; #elif defined(GLOBAL_FILTER) && !defined(CONSTANT_FILTER) && !defined(LOCAL_FILTER) __device__ float filter[FILTER_HEIGHT][FILTER_WIDTH] = FILTER; #endif typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel * data; } PPMImage; static PPMImage * readPPM( const char * filename ) { char buff[16]; PPMImage * img; FILE * fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel *)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM( const char * filename, PPMImage * img ) { FILE * fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments fprintf(fp, "# Created by %s\n",CREATOR); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } __global__ void blur_kernel( PPMImage * img, PPMImage * out ) { #if defined(LOCAL_FILTER) && !defined(CONSTANT_FILTER) && !defined(GLOBAL_FILTER) float filter[FILTER_HEIGHT][FILTER_WIDTH] = FILTER; #endif int img_x = img->x; int img_y = img->y; int x_per_block = split(img_x, gridDim.x); int y_per_block = split(img_y, gridDim.y); int min_x_of_block = x_per_block * blockIdx.x; int min_y_of_block = y_per_block * blockIdx.y; int max_x_of_block = min_x_of_block + x_per_block - 1; int max_y_of_block = min_y_of_block + y_per_block - 1; if (max_x_of_block > img_x - 1) max_x_of_block = img_x - 1; if (max_y_of_block > img_y - 1) max_y_of_block = img_y - 1; int work_per_block = (x_per_block * y_per_block); int threads_per_block = (blockDim.x * blockDim.y); int work_per_thread = split(work_per_block, threads_per_block); int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int thread_work_offset = work_per_thread * thread_id; #if SHARED_DIM int x_sources_per_block = (x_per_block + 2 * x_radius); int y_sources_per_block = (y_per_block + 2 * y_radius); int sources_per_block = x_sources_per_block * y_sources_per_block; int sources_per_thread = split(sources_per_block, threads_per_block); int thread_source_offset = sources_per_thread * thread_id; __shared__ PPMPixel block_source[SHARED_DIM]; for (int i = 0; i < sources_per_thread && (thread_source_offset + i) < sources_per_block; i ++) { int x_from_block_sources = (thread_source_offset + i) % x_sources_per_block; int y_from_block_sources = (thread_source_offset + i) / x_sources_per_block; int x_from_block = x_from_block_sources - x_radius; int y_from_block = y_from_block_sources - y_radius; int x = x_from_block + min_x_of_block; int y = y_from_block + min_y_of_block; int source_index = y_from_block_sources * x_sources_per_block + x_from_block_sources; if (source_index < SHARED_DIM) block_source[source_index] = (x >= 0 && x < img_x && y >= 0 && y < img_y) ? img->data[y * img_x + x] : (PPMPixel) { 0, 0, 0 }; } __syncthreads(); #endif out->x = img_x; out->y = img_y; for (int i = 0; i < work_per_thread && thread_work_offset + i < work_per_block; i ++) { int x_from_block = (thread_work_offset + i) % x_per_block; int y_from_block = (thread_work_offset + i) / x_per_block; int x = x_from_block + min_x_of_block; int y = y_from_block + min_y_of_block; if (x <= max_x_of_block && y <= max_y_of_block) { float r = 0; float g = 0; float b = 0; for (int x_from_point = - x_radius; x_from_point <= + x_radius; x_from_point ++) { for (int y_from_point = - y_radius; y_from_point <= + y_radius; y_from_point ++) { int filter_x = x_radius + x_from_point; int filter_y = y_radius + y_from_point; #if SHARED_DIM int source_index = (y_from_block + filter_y) * x_sources_per_block + (x_from_block + filter_x); #endif PPMPixel filter_point = #if SHARED_DIM source_index < SHARED_DIM ? block_source[source_index] : #endif (x + x_from_point) >= 0 && (x + x_from_point) < img_x && (y + y_from_point) >= 0 && (y + y_from_point) < img_y ? img->data[(y + y_from_point) * img_x + (x + x_from_point)] : (PPMPixel) { 0, 0, 0 }; float filter_weight = filter[filter_x][filter_y]; r += filter_weight * filter_point.red; g += filter_weight * filter_point.green; b += filter_weight * filter_point.blue; } } out->data[y * img_x + x] = (PPMPixel) { r, g, b }; } } } dim3 grid_dim(GRID_DIM); dim3 block_dim(BLOCK_DIM); void gaussian_blur( PPMImage * img ) { int n = img->x * img->y; #ifdef VERBOSE printf("blurring...\n"); int img_x = img->x; int img_y = img->y; int x_per_block = split(img_x, grid_dim.x); int y_per_block = split(img_y, grid_dim.y); int x_sources_per_block = (x_per_block + 2 * x_radius); int y_sources_per_block = (y_per_block + 2 * y_radius); int sources_per_block = x_sources_per_block * y_sources_per_block; int work_per_block = (x_per_block * y_per_block); int threads_per_block = (block_dim.x * block_dim.y); int sources_per_thread = split(sources_per_block, threads_per_block); int work_per_thread = split(work_per_block, threads_per_block); printf("problem size %d*%d=%d, shared memory "ARG(SHARED_DIM)", filter size "ARG(FILTER_WIDTH)"*"ARG(FILTER_HEIGHT)"=%d, grid dim "ARG(GRID_DIM)"=%d, block size "ARG(BLOCK_DIM)"=%d - %d,%d - %d->%d, thread size %d->%d\n" , img_x, img_y, n, FILTER_WIDTH * FILTER_HEIGHT, grid_dim.x * grid_dim.y * grid_dim.z, threads_per_block , x_per_block, y_per_block, sources_per_block, work_per_block, sources_per_thread, work_per_thread); #endif PPMImage * dev_in; PPMImage * dev_out; PPMImage * host_temp = (PPMImage *) malloc(sizeof(PPMImage)); * host_temp = (PPMImage) { .x = img->x, .y = img->y }; cuda_try(hipMalloc((void **)&(host_temp->data), n * sizeof(PPMPixel))); cuda_try(hipMemcpy(host_temp->data, img->data, n * sizeof(PPMPixel), hipMemcpyHostToDevice)); cuda_try(hipMalloc((void **)&dev_in, sizeof(PPMImage))); cuda_try(hipMemcpy(dev_in, host_temp, sizeof(PPMImage), hipMemcpyHostToDevice)); cuda_try(hipMalloc((void **)&(host_temp->data), n * sizeof(PPMPixel))); cuda_try(hipMalloc((void **)&dev_out, sizeof(PPMImage))); cuda_try(hipMemcpy(dev_out, host_temp, sizeof(PPMImage), hipMemcpyHostToDevice)); #ifdef PROFILE float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #endif hipLaunchKernelGGL(( blur_kernel), dim3(grid_dim),dim3(block_dim), 0, 0, dev_in, dev_out); #ifdef PROFILE hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("%f ms\n", time); #endif cuda_try(hipPeekAtLastError()); cuda_try(hipMemcpy(host_temp, dev_out, sizeof(PPMImage), hipMemcpyDeviceToHost)); cuda_try(hipMemcpy(img->data, host_temp->data, n * sizeof(PPMPixel), hipMemcpyDeviceToHost)); cuda_try(hipFree(host_temp->data)); cuda_try(hipFree(dev_out)); cuda_try(hipMemcpy(host_temp, dev_in, sizeof(PPMImage), hipMemcpyDeviceToHost)); cuda_try(hipFree(host_temp->data)); cuda_try(hipFree(dev_in)); #ifdef VERBOSE printf("blurring complete\n"); #endif } int main( void ) { PPMImage * image = readPPM(INPUT_FILE); gaussian_blur(image); #if !defined(DRY_RUN) writePPM(OUTPUT_FILE, image); #endif }
699611d13dc5fcc804916947fb398bcd43ba1106.cu
#include<stdio.h> #include<stdlib.h> /** * * PARAMETERS * */ #define VERBOSE #define PROFILE //#define DRY_RUN #define INPUT_FILE "input.ppm" #define OUTPUT_FILE "output.ppm" #define FILTER_WIDTH 3 #define FILTER_HEIGHT 3 #define FILTER { \ { 0.05, 0.1, 0.05 }, \ { 0.1, 0.4, 0.1 }, \ { 0.05, 0.1, 0.05 }, \ } //#define CONSTANT_FILTER/* #define GLOBAL_FILTER/* #define LOCAL_FILTER/**/ #define GRID_DIM 32,32 #define BLOCK_DIM 32,32 #define SHARED_DIM 0 /** * * METADATA * */ #define CREATOR "Jae" /** * * CUDA UTILS * */ #define cuda_try( ans ) { __cuda_try((ans), __FILE__, __LINE__); } inline void __cuda_try( cudaError_t code, const char * file, int line, bool abort=true ) { if (code != cudaSuccess) { fprintf(stderr, "CUDA THROW %s CAUGHT AT %s LINE %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** * * UTILS * */ #define RGB_COMPONENT_COLOR 255 #define STR_EXPAND(...) #__VA_ARGS__ #define ARG(...) STR_EXPAND(__VA_ARGS__) #define x_radius (FILTER_WIDTH / 2) #define y_radius (FILTER_HEIGHT / 2) #define split( n, among ) { ((n + (among - 1)) / among) } #if defined(CONSTANT_FILTER) && !defined(GLOBAL_FILTER) && !defined(LOCAL_FILTER) __constant__ float filter[FILTER_HEIGHT][FILTER_WIDTH] = FILTER; #elif defined(GLOBAL_FILTER) && !defined(CONSTANT_FILTER) && !defined(LOCAL_FILTER) __device__ float filter[FILTER_HEIGHT][FILTER_WIDTH] = FILTER; #endif typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel * data; } PPMImage; static PPMImage * readPPM( const char * filename ) { char buff[16]; PPMImage * img; FILE * fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel *)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM( const char * filename, PPMImage * img ) { FILE * fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments fprintf(fp, "# Created by %s\n",CREATOR); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } __global__ void blur_kernel( PPMImage * img, PPMImage * out ) { #if defined(LOCAL_FILTER) && !defined(CONSTANT_FILTER) && !defined(GLOBAL_FILTER) float filter[FILTER_HEIGHT][FILTER_WIDTH] = FILTER; #endif int img_x = img->x; int img_y = img->y; int x_per_block = split(img_x, gridDim.x); int y_per_block = split(img_y, gridDim.y); int min_x_of_block = x_per_block * blockIdx.x; int min_y_of_block = y_per_block * blockIdx.y; int max_x_of_block = min_x_of_block + x_per_block - 1; int max_y_of_block = min_y_of_block + y_per_block - 1; if (max_x_of_block > img_x - 1) max_x_of_block = img_x - 1; if (max_y_of_block > img_y - 1) max_y_of_block = img_y - 1; int work_per_block = (x_per_block * y_per_block); int threads_per_block = (blockDim.x * blockDim.y); int work_per_thread = split(work_per_block, threads_per_block); int thread_id = threadIdx.y * blockDim.x + threadIdx.x; int thread_work_offset = work_per_thread * thread_id; #if SHARED_DIM int x_sources_per_block = (x_per_block + 2 * x_radius); int y_sources_per_block = (y_per_block + 2 * y_radius); int sources_per_block = x_sources_per_block * y_sources_per_block; int sources_per_thread = split(sources_per_block, threads_per_block); int thread_source_offset = sources_per_thread * thread_id; __shared__ PPMPixel block_source[SHARED_DIM]; for (int i = 0; i < sources_per_thread && (thread_source_offset + i) < sources_per_block; i ++) { int x_from_block_sources = (thread_source_offset + i) % x_sources_per_block; int y_from_block_sources = (thread_source_offset + i) / x_sources_per_block; int x_from_block = x_from_block_sources - x_radius; int y_from_block = y_from_block_sources - y_radius; int x = x_from_block + min_x_of_block; int y = y_from_block + min_y_of_block; int source_index = y_from_block_sources * x_sources_per_block + x_from_block_sources; if (source_index < SHARED_DIM) block_source[source_index] = (x >= 0 && x < img_x && y >= 0 && y < img_y) ? img->data[y * img_x + x] : (PPMPixel) { 0, 0, 0 }; } __syncthreads(); #endif out->x = img_x; out->y = img_y; for (int i = 0; i < work_per_thread && thread_work_offset + i < work_per_block; i ++) { int x_from_block = (thread_work_offset + i) % x_per_block; int y_from_block = (thread_work_offset + i) / x_per_block; int x = x_from_block + min_x_of_block; int y = y_from_block + min_y_of_block; if (x <= max_x_of_block && y <= max_y_of_block) { float r = 0; float g = 0; float b = 0; for (int x_from_point = - x_radius; x_from_point <= + x_radius; x_from_point ++) { for (int y_from_point = - y_radius; y_from_point <= + y_radius; y_from_point ++) { int filter_x = x_radius + x_from_point; int filter_y = y_radius + y_from_point; #if SHARED_DIM int source_index = (y_from_block + filter_y) * x_sources_per_block + (x_from_block + filter_x); #endif PPMPixel filter_point = #if SHARED_DIM source_index < SHARED_DIM ? block_source[source_index] : #endif (x + x_from_point) >= 0 && (x + x_from_point) < img_x && (y + y_from_point) >= 0 && (y + y_from_point) < img_y ? img->data[(y + y_from_point) * img_x + (x + x_from_point)] : (PPMPixel) { 0, 0, 0 }; float filter_weight = filter[filter_x][filter_y]; r += filter_weight * filter_point.red; g += filter_weight * filter_point.green; b += filter_weight * filter_point.blue; } } out->data[y * img_x + x] = (PPMPixel) { r, g, b }; } } } dim3 grid_dim(GRID_DIM); dim3 block_dim(BLOCK_DIM); void gaussian_blur( PPMImage * img ) { int n = img->x * img->y; #ifdef VERBOSE printf("blurring...\n"); int img_x = img->x; int img_y = img->y; int x_per_block = split(img_x, grid_dim.x); int y_per_block = split(img_y, grid_dim.y); int x_sources_per_block = (x_per_block + 2 * x_radius); int y_sources_per_block = (y_per_block + 2 * y_radius); int sources_per_block = x_sources_per_block * y_sources_per_block; int work_per_block = (x_per_block * y_per_block); int threads_per_block = (block_dim.x * block_dim.y); int sources_per_thread = split(sources_per_block, threads_per_block); int work_per_thread = split(work_per_block, threads_per_block); printf("problem size %d*%d=%d, shared memory "ARG(SHARED_DIM)", filter size "ARG(FILTER_WIDTH)"*"ARG(FILTER_HEIGHT)"=%d, grid dim "ARG(GRID_DIM)"=%d, block size "ARG(BLOCK_DIM)"=%d - %d,%d - %d->%d, thread size %d->%d\n" , img_x, img_y, n, FILTER_WIDTH * FILTER_HEIGHT, grid_dim.x * grid_dim.y * grid_dim.z, threads_per_block , x_per_block, y_per_block, sources_per_block, work_per_block, sources_per_thread, work_per_thread); #endif PPMImage * dev_in; PPMImage * dev_out; PPMImage * host_temp = (PPMImage *) malloc(sizeof(PPMImage)); * host_temp = (PPMImage) { .x = img->x, .y = img->y }; cuda_try(cudaMalloc((void **)&(host_temp->data), n * sizeof(PPMPixel))); cuda_try(cudaMemcpy(host_temp->data, img->data, n * sizeof(PPMPixel), cudaMemcpyHostToDevice)); cuda_try(cudaMalloc((void **)&dev_in, sizeof(PPMImage))); cuda_try(cudaMemcpy(dev_in, host_temp, sizeof(PPMImage), cudaMemcpyHostToDevice)); cuda_try(cudaMalloc((void **)&(host_temp->data), n * sizeof(PPMPixel))); cuda_try(cudaMalloc((void **)&dev_out, sizeof(PPMImage))); cuda_try(cudaMemcpy(dev_out, host_temp, sizeof(PPMImage), cudaMemcpyHostToDevice)); #ifdef PROFILE float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #endif blur_kernel<<<grid_dim,block_dim>>>(dev_in, dev_out); #ifdef PROFILE cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("%f ms\n", time); #endif cuda_try(cudaPeekAtLastError()); cuda_try(cudaMemcpy(host_temp, dev_out, sizeof(PPMImage), cudaMemcpyDeviceToHost)); cuda_try(cudaMemcpy(img->data, host_temp->data, n * sizeof(PPMPixel), cudaMemcpyDeviceToHost)); cuda_try(cudaFree(host_temp->data)); cuda_try(cudaFree(dev_out)); cuda_try(cudaMemcpy(host_temp, dev_in, sizeof(PPMImage), cudaMemcpyDeviceToHost)); cuda_try(cudaFree(host_temp->data)); cuda_try(cudaFree(dev_in)); #ifdef VERBOSE printf("blurring complete\n"); #endif } int main( void ) { PPMImage * image = readPPM(INPUT_FILE); gaussian_blur(image); #if !defined(DRY_RUN) writePPM(OUTPUT_FILE, image); #endif }
7cfabe4b16f836ea749043d424caedcda886327d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fast_pcl/ndt_gpu/NormalDistributionsTransform.h" #include "fast_pcl/ndt_gpu/debug.h" #include <cmath> #include <iostream> #include <pcl/common/transforms.h> namespace gpu { GNormalDistributionsTransform::GNormalDistributionsTransform() { //GRegistration::GRegistration(); gauss_d1_ = gauss_d2_ = 0; outlier_ratio_ = 0.55; step_size_ = 0.1; resolution_ = 1.0f; trans_probability_ = 0; double gauss_c1, gauss_c2, gauss_d3; // Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009] gauss_c1 = 10.0 * (1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow (resolution_, 3); gauss_d3 = -log (gauss_c2); gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3; gauss_d2_ = -2 * log ((-log ( gauss_c1 * exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_); transformation_epsilon_ = 0.1; max_iterations_ = 35; j_ang_ = MatrixHost(24, 1); h_ang_ = MatrixHost(45, 1); dj_ang_ = MatrixDevice(24, 1); dh_ang_ = MatrixDevice(45, 1); real_iterations_ = 0; } GNormalDistributionsTransform::GNormalDistributionsTransform(const GNormalDistributionsTransform &other) { gauss_d1_ = other.gauss_d1_; gauss_d2_ = other.gauss_d2_; outlier_ratio_ = other.outlier_ratio_; j_ang_ = other.j_ang_; h_ang_ = other.h_ang_; dj_ang_ = other.dj_ang_; dh_ang_ = other.dh_ang_; step_size_ = other.step_size_; resolution_ = other.resolution_; trans_probability_ = other.trans_probability_; real_iterations_ = other.real_iterations_; voxel_grid_ = other.voxel_grid_; } GNormalDistributionsTransform::~GNormalDistributionsTransform() { dj_ang_.memFree(); dh_ang_.memFree(); } void GNormalDistributionsTransform::setStepSize(double step_size) { step_size_ = step_size; } void GNormalDistributionsTransform::setResolution(float resolution) { resolution_ = resolution; } void GNormalDistributionsTransform::setOutlierRatio(double olr) { outlier_ratio_ = olr; } double GNormalDistributionsTransform::getStepSize() const { return step_size_; } float GNormalDistributionsTransform::getResolution() const { return resolution_; } double GNormalDistributionsTransform::getOutlierRatio() const { return outlier_ratio_; } double GNormalDistributionsTransform::getTransformationProbability() const { return trans_probability_; } int GNormalDistributionsTransform::getRealIterations() { return real_iterations_; } double GNormalDistributionsTransform::auxilaryFunction_PsiMT(double a, double f_a, double f_0, double g_0, double mu) { return (f_a - f_0 - mu * g_0 * a); } double GNormalDistributionsTransform::auxilaryFunction_dPsiMT(double g_a, double g_0, double mu) { return (g_a - mu * g_0); } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } } void GNormalDistributionsTransform::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess) { if (dj_ang_.isEmpty()) { dj_ang_.memAlloc(); } if (dh_ang_.isEmpty()) { dh_ang_.memAlloc(); } nr_iterations_ = 0; converged_ = false; double gauss_c1, gauss_c2, gauss_d3; gauss_c1 = 10 * ( 1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow(resolution_, 3); gauss_d3 = - log(gauss_c2); gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3; gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_); if (guess != Eigen::Matrix4f::Identity()) { final_transformation_ = guess; transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess); } Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation; eig_transformation.matrix() = final_transformation_; Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient; Eigen::Vector3f init_translation = eig_transformation.translation(); Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2); p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2); Eigen::Matrix<double, 6, 6> hessian; double score = 0; double delta_p_norm; score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p); int loop_time = 0; while (!converged_) { previous_transformation_ = transformation_; Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV); delta_p = sv.solve(-score_gradient); delta_p_norm = delta_p.norm(); if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) { trans_probability_ = score / static_cast<double>(points_number_); converged_ = delta_p_norm == delta_p_norm; return; } delta_p.normalize(); delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_); delta_p *= delta_p_norm; Eigen::Translation<float, 3> translation(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); transformation_ = (translation * tmp4).matrix(); p = p + delta_p; //Not update visualizer if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (::fabs(delta_p_norm) < transformation_epsilon_))) converged_ = true; nr_iterations_++; loop_time++; } trans_probability_ = score / static_cast<double>(points_number_); } /* First step of computing point gradients */ __global__ void computePointGradients0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg00, double *pg11, double *pg22, double *pg13, double *pg23, double *pg04, double *pg14) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Set the 3x3 block start from (0, 0) to identity matrix pg00[i] = 1; pg11[i] = 1; pg22[i] = 1; //Compute point derivatives pg13[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg23[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg04[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg14[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* Second step of computing point gradients */ __global__ void computePointGradients1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg24, double *pg05, double *pg15, double *pg25) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x + 12]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Compute point derivatives pg24[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg05[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg15[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg25[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* First step of computing point hessians */ __global__ void computePointHessian0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph93, double *ph103, double *ph113, double *ph123, double *ph94, double *ph133, double *ph104, double *ph143, double *ph114, double *ph153, double *ph95, double *ph163, double *ph105, double *ph173, double *ph115) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph93[i] = 0; ph103[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph113[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph123[i] = ph94[i] = 0; ph133[i] = ph104[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph143[i] = ph114[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph153[i] = ph95[i] = 0; ph163[i] = ph105[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph173[i] = ph115[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph124, double *ph134, double *ph144, double *ph154, double *ph125, double *ph164, double *ph135, double *ph174, double *ph145) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[18 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph124[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph134[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph144[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph154[i] = ph125[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph164[i] = ph135[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph174[i] = ph145[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian2(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph155, double *ph165, double *ph175) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[9]; if (threadIdx.x < 9) { h_ang[threadIdx.x] = dh_ang[36 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph155[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph165[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph175[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; } } /* compute score_inc list for input points. * The final score_inc is calculated by a reduction sum * on this score_inc list. */ __global__ void computeScoreList(int *starting_voxel_id, int *voxel_id, int valid_points_num, double *e_x_cov_x, double gauss_d1, double *score) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { double score_inc = 0; for (int vid = starting_voxel_id[i]; vid < starting_voxel_id[i + 1]; vid++) { double tmp_ex = e_x_cov_x[vid]; score_inc += (tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex) ? 0 : -gauss_d1 * tmp_ex; } score[i] = score_inc; } } /* First step to compute score gradient list for input points */ __global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, int voxel_num, double *e_x_cov_x, double *cov_dxd_pi, double gauss_d1, int valid_voxel_num, double *score_gradients) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *sg = score_gradients + col * valid_points_num; double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double tmp_sg = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex; } } sg[i] = tmp_sg; } } } /* Intermediate step to compute e_x_cov_x */ __global__ void computeExCovX(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centr_x, double *centr_y, double *centr_z, double gauss_d1, double gauss_d2, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double t_x, t_y, t_z; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; t_x = d_x - centr_x[vid]; t_y = d_y - centr_y[vid]; t_z = d_z - centr_z[vid]; e_x_cov_x[j] = exp(-gauss_d2 * ((t_x * icov00[vid] + t_y * icov01[vid] + t_z * icov02[vid]) * t_x + ((t_x * icov10[vid] + t_y * icov11[vid] + t_z * icov12[vid]) * t_y) + ((t_x * icov20[vid] + t_y * icov21[vid] + t_z * icov22[vid]) * t_z)) / 2.0); } } } /* update e_x_cov_x - Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009] */ __global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_voxel_num; i += stride) { e_x_cov_x[i] *= gauss_d2; } } /* compute cov_dxd_pi as reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]*/ __global__ void computeCovDxdPi(int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *inverse_covariance, int voxel_num, double gauss_d1, double gauss_d2, double *point_gradients, double *cov_dxd_pi, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 3 && col < 6) { double *icov0 = inverse_covariance + row * 3 * voxel_num; double *icov1 = icov0 + voxel_num; double *icov2 = icov1 + voxel_num; double *cov_dxd_pi_tmp = cov_dxd_pi + (row * 6 + col) * valid_voxel_num; double *pg_tmp0 = point_gradients + col * valid_points_num; double *pg_tmp1 = pg_tmp0 + 6 * valid_points_num; double *pg_tmp2 = pg_tmp1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { double pg0 = pg_tmp0[i]; double pg1 = pg_tmp1[i]; double pg2 = pg_tmp2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; cov_dxd_pi_tmp[j] = icov0[vid] * pg0 + icov1[vid] * pg1 + icov2[vid] * pg2; } } } } /* First step to compute hessian list for input points */ __global__ void computeHessianListS0(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_gradients, double *tmp_hessian, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; tmp_h[j] = (d_x - centroid_x[vid]) * (icov00[vid] * pg0 + icov01[vid] * pg1 + icov02[vid] * pg2) + (d_y - centroid_y[vid]) * (icov10[vid] * pg0 + icov11[vid] * pg1 + icov12[vid] * pg2) + (d_z - centroid_z[vid]) * (icov20[vid] * pg0 + icov21[vid] * pg1 + icov22[vid] * pg2); } } } } /* Fourth step to compute hessian list */ __global__ void computeHessianListS1(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double gauss_d2, double *hessians, double *e_x_cov_x, double *tmp_hessian, double *cov_dxd_pi, double *point_gradients, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *cov_dxd_pi_mat0 = cov_dxd_pi + row * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; double final_hessian = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { double cov_dxd0 = cov_dxd_pi_mat0[j]; double cov_dxd1 = cov_dxd_pi_mat1[j]; double cov_dxd2 = cov_dxd_pi_mat2[j]; tmp_ex *= gauss_d1; final_hessian += -gauss_d2 * ((d_x - centroid_x[vid]) * cov_dxd0 + (d_y - centroid_y[vid]) * cov_dxd1 + (d_z - centroid_z[vid]) * cov_dxd2) * tmp_h[j] * tmp_ex; final_hessian += (pg0 * cov_dxd0 + pg1 * cov_dxd1 + pg2 * cov_dxd2) * tmp_ex; } } h[i] = final_hessian; } } } __global__ void computeHessianListS2(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_hessians, double *hessians, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_ph0 = point_hessians + ((3 * row) * 6 + col) * valid_points_num; double *tmp_ph1 = tmp_ph0 + 6 * valid_points_num; double *tmp_ph2 = tmp_ph1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double ph0 = tmp_ph0[i]; double ph1 = tmp_ph1[i]; double ph2 = tmp_ph2[i]; double final_hessian = h[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; final_hessian += (d_x - centroid_x[vid]) * (icov00[vid] * ph0 + icov01[vid] * ph1 + icov02[vid] * ph2) * tmp_ex; final_hessian += (d_y - centroid_y[vid]) * (icov10[vid] * ph0 + icov11[vid] * ph1 + icov12[vid] * ph2) * tmp_ex; final_hessian += (d_z - centroid_z[vid]) * (icov20[vid] * ph0 + icov21[vid] * ph1 + icov22[vid] * ph2) * tmp_ex; } } h[i] = final_hessian; } } } /* Compute sum of a list of matrices */ __global__ void matrixSum(double *matrix_list, int full_size, int half_size, int rows, int cols, int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; for (int i = index; i < half_size && row < rows && col < cols; i += stride) { MatrixDevice left(rows, cols, offset, matrix_list + i); double *right_ptr = (i + half_size < full_size) ? matrix_list + i + half_size : NULL; MatrixDevice right(rows, cols, offset, right_ptr); if (right_ptr != NULL) { left(row, col) += right(row, col); } } } /* Compute sum of score_inc list */ __global__ void sumScore(double *score, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) { score[i] += (i + half_size < full_size) ? score[i + half_size] : 0; } } double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> pose, bool compute_hessian) { MatrixHost p(6, 1); for (int i = 0; i < 6; i++) { p(i) = pose(i, 0); } score_gradient.setZero (); hessian.setZero (); //Compute Angle Derivatives computeAngleDerivatives(p); //Radius Search int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; valid_points = voxel_id = starting_voxel_id = NULL; voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); double *centroid = voxel_grid_.getCentroidList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num == 0) return 0; //Update score gradient and hessian matrix double *gradients, *hessians, *point_gradients, *point_hessians, *score; checkCudaErrors(hipMalloc(&gradients, sizeof(double) * valid_points_num * 6)); checkCudaErrors(hipMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(hipMalloc(&score, sizeof(double) * valid_points_num)); checkCudaErrors(hipMemset(gradients, 0, sizeof(double) * valid_points_num * 6)); checkCudaErrors(hipMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; hipLaunchKernelGGL(( computePointGradients0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointGradients1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(hipGetLastError()); if (compute_hessian) { hipLaunchKernelGGL(( computePointHessian0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian2), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(hipGetLastError()); } checkCudaErrors(hipDeviceSynchronize()); double *tmp_hessian; checkCudaErrors(hipMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(hipMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(hipMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); hipLaunchKernelGGL(( computeExCovX), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computeScoreList), dim3(grid_x), dim3(block_x), 0, 0, starting_voxel_id, voxel_id, valid_points_num, e_x_cov_x, gauss_d1_, score); checkCudaErrors(hipGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; hipLaunchKernelGGL(( updateExCovX), dim3(grid_x2), dim3(block_x2), 0, 0, e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; hipLaunchKernelGGL(( computeCovDxdPi), dim3(grid), dim3(block_x), 0, 0, valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.x = grid_x; grid.y = 6; grid.z = 1; hipLaunchKernelGGL(( computeScoreGradientList), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, voxel_num, e_x_cov_x, cov_dxd_pi, gauss_d1_, valid_voxel_num, gradients); checkCudaErrors(hipGetLastError()); if (compute_hessian) { grid.y = 6; grid.z = 1; hipLaunchKernelGGL(( computeHessianListS0), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.z = 6; hipLaunchKernelGGL(( computeHessianListS1), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computeHessianListS2), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(hipGetLastError()); } int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 1; grid.z = 6; hipLaunchKernelGGL(( matrixSum), dim3(grid), dim3(block_x), 0, 0, gradients, full_size, half_size, 1, 6, valid_points_num); checkCudaErrors(hipGetLastError()); grid.y = 6; hipLaunchKernelGGL(( matrixSum), dim3(grid), dim3(block_x), 0, 0, hessians, full_size, half_size, 6, 6, valid_points_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( sumScore), dim3(grid_x), dim3(block_x), 0, 0, score, full_size, half_size); checkCudaErrors(hipGetLastError()); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(hipDeviceSynchronize()); MatrixDevice dgrad(1, 6, valid_points_num, gradients), dhess(6, 6, valid_points_num, hessians); MatrixHost hgrad(1, 6), hhess(6, 6); hgrad.moveToHost(dgrad); hhess.moveToHost(dhess); for (int i = 0; i < 6; i++) { score_gradient(i) = hgrad(i); } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhess(i, j); } } double score_inc; checkCudaErrors(hipMemcpy(&score_inc, score, sizeof(double), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(gradients)); checkCudaErrors(hipFree(hessians)); checkCudaErrors(hipFree(point_hessians)); checkCudaErrors(hipFree(point_gradients)); checkCudaErrors(hipFree(score)); checkCudaErrors(hipFree(tmp_hessian)); checkCudaErrors(hipFree(e_x_cov_x)); checkCudaErrors(hipFree(cov_dxd_pi)); if (valid_points != NULL) checkCudaErrors(hipFree(valid_points)); if (voxel_id != NULL) checkCudaErrors(hipFree(voxel_id)); if (starting_voxel_id != NULL) checkCudaErrors(hipFree(starting_voxel_id)); return score_inc; } void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost pose, bool compute_hessian) { double cx, cy, cz, sx, sy, sz; if (fabs(pose(3)) < 10e-5) { cx = 1.0; sx = 0.0; } else { cx = cos(pose(3)); sx = sin(pose(3)); } if (fabs(pose(4)) < 10e-5) { cy = 1.0; sy = 0.0; } else { cy = cos(pose(4)); sy = sin(pose(4)); } if (fabs(pose(5)) < 10e-5) { cz = 1.0; sz = 0.0; } else { cz = cos(pose(5)); sz = sin(pose(5)); } j_ang_(0) = -sx * sz + cx * sy * cz; j_ang_(1) = -sx * cz - cx * sy * sz; j_ang_(2) = -cx * cy; j_ang_(3) = cx * sz + sx * sy * cz; j_ang_(4) = cx * cz - sx * sy * sz; j_ang_(5) = -sx * cy; j_ang_(6) = -sy * cz; j_ang_(7) = sy * sz; j_ang_(8) = cy; j_ang_(9) = sx * cy * cz; j_ang_(10) = -sx * cy * sz; j_ang_(11) = sx * sy; j_ang_(12) = -cx * cy * cz; j_ang_(13) = cx * cy * sz; j_ang_(14) = -cx * sy; j_ang_(15) = -cy * sz; j_ang_(16) = -cy * cz; j_ang_(17) = 0; j_ang_(18) = cx * cz - sx * sy * sz; j_ang_(19) = -cx * sz - sx * sy * cz; j_ang_(20) = 0; j_ang_(21) = sx * cz + cx * sy * sz; j_ang_(22) = cx * sy * cz - sx * sz; j_ang_(23) = 0; j_ang_.moveToGpu(dj_ang_); if (compute_hessian) { h_ang_(0) = -cx * sz - sx * sy * cz; h_ang_(1) = -cx * cz + sx * sy * sz; h_ang_(2) = sx * cy; h_ang_(3) = -sx * sz + cx * sy * cz; h_ang_(4) = -cx * sy * sz - sx * cz; h_ang_(5) = -cx * cy; h_ang_(6) = cx * cy * cz; h_ang_(7) = -cx * cy * sz; h_ang_(8) = cx * sy; h_ang_(9) = sx * cy * cz; h_ang_(10) = -sx * cy * sz; h_ang_(11) = sx * sy; h_ang_(12) = -sx * cz - cx * sy * sz; h_ang_(13) = sx * sz - cx * sy * cz; h_ang_(14) = 0; h_ang_(15) = cx * cz - sx * sy * sz; h_ang_(16) = -sx * sy * cz - cx * sz; h_ang_(17) = 0; h_ang_(18) = -cy * cz; h_ang_(19) = cy * sz; h_ang_(20) = sy; h_ang_(21) = -sx * sy * cz; h_ang_(22) = sx * sy * sz; h_ang_(23) = sx * cy; h_ang_(24) = cx * sy * cz; h_ang_(25) = -cx * sy * sz; h_ang_(26) = -cx * cy; h_ang_(27) = sy * sz; h_ang_(28) = sy * cz; h_ang_(29) = 0; h_ang_(30) = -sx * cy * sz; h_ang_(31) = -sx * cy * cz; h_ang_(32) = 0; h_ang_(33) = cx * cy * sz; h_ang_(34) = cx * cy * cz; h_ang_(35) = 0; h_ang_(36) = -cy * cz; h_ang_(37) = cy * sz; h_ang_(38) = 0; h_ang_(39) = -cx * sz - sx * sy * cz; h_ang_(40) = -cx * cz + sx * sy * sz; h_ang_(41) = 0; h_ang_(42) = -sx * sz + cx * sy * cz; h_ang_(43) = -cx * sy * sz - sx * cz; h_ang_(44) = 0; h_ang_.moveToGpu(dh_ang_); } } __global__ void gpuTransform(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int point_num, MatrixDevice transform) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; float x, y, z; for (int i = idx; i < point_num; i += stride) { x = in_x[i]; y = in_y[i]; z = in_z[i]; trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3); trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3); trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3); } } void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int points_number, Eigen::Matrix<float, 4, 4> transform) { Eigen::Transform<float, 3, Eigen::Affine> t(transform); MatrixHost htrans(3, 4); MatrixDevice dtrans(3, 4); dtrans.memAlloc(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { htrans(i, j) = t(i, j); } } htrans.moveToGpu(dtrans); if (points_number > 0) { int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X; int grid_x = (points_number - 1) / block_x + 1; hipLaunchKernelGGL(( gpuTransform), dim3(grid_x), dim3(block_x) , 0, 0, in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); } dtrans.memFree(); } double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir, double step_init, double step_max, double step_min, double &score, Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num) { double phi_0 = -score; double d_phi_0 = -(score_gradient.dot(step_dir)); Eigen::Matrix<double, 6, 1> x_t; if (d_phi_0 >= 0) { if (d_phi_0 == 0) return 0; else { d_phi_0 *= -1; step_dir *= -1; } } int max_step_iterations = 10; int step_iterations = 0; double mu = 1.e-4; double nu = 0.9; double a_l = 0, a_u = 0; double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu); double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu); double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); bool interval_converged = (step_max - step_min) > 0, open_interval = true; double a_t = step_init; a_t = ::min(a_t, step_max); a_t = ::max(a_t, step_min); x_t = x + step_dir * a_t; Eigen::Translation<float, 3> translation(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t); double phi_t = -score; double d_phi_t = -(score_gradient.dot(step_dir)); double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) { if (open_interval) { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } a_t = (a_t < step_max) ? a_t : step_max; a_t = (a_t > step_min) ? a_t : step_min; x_t = x + step_dir * a_t; translation = Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); tmp1 = Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); tmp2 = Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); tmp3 = Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); tmp4 = tmp1 * tmp2 * tmp3; final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false); phi_t -= score; d_phi_t -= (score_gradient.dot(step_dir)); psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) { open_interval = false; f_l += phi_0 - mu * d_phi_0 * a_l; g_l += mu * d_phi_0; f_u += phi_0 - mu * d_phi_0 * a_u; g_u += mu * d_phi_0; } if (open_interval) { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } step_iterations++; } if (step_iterations) { computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t); } real_iterations_ += step_iterations; return a_t; } //Copied from ndt.hpp double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l, double a_u, double f_u, double g_u, double a_t, double f_t, double g_t) { // Case 1 in Trial Value Selection [More, Thuente 1994] if (f_t > f_l) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l // Equation 2.4.2 [Sun, Yuan 2006] double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t)); if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l)) return (a_c); else return (0.5 * (a_q + a_c)); } // Case 2 in Trial Value Selection [More, Thuente 1994] else if (g_t * g_l < 0) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t)) return (a_c); else return (a_s); } // Case 3 in Trial Value Selection [More, Thuente 1994] else if (std::fabs (g_t) <= std::fabs (g_l)) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; double a_t_next; if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t)) a_t_next = a_c; else a_t_next = a_s; if (a_t > a_l) return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next)); else return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next)); } // Case 4 in Trial Value Selection [More, Thuente 1994] else { // Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u; double w = std::sqrt (z * z - g_t * g_u); // Equation 2.4.56 [Sun, Yuan 2006] return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w)); } } //Copied from ndt.hpp double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l, double &a_u, double &f_u, double &g_u, double a_t, double f_t, double g_t) { // Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994] if (f_t > f_l) { a_u = a_t; f_u = f_t; g_u = g_t; return (false); } // Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) > 0) { a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) < 0) { a_u = a_l; f_u = f_l; g_u = g_l; a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Interval Converged else return (true); } void GNormalDistributionsTransform::computeHessian(Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p) { int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; //Radius Search voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *centroid = voxel_grid_.getCentroidList(); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num <= 0) return; //Update score gradient and hessian matrix double *hessians, *point_gradients, *point_hessians; checkCudaErrors(hipMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(hipMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(hipMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(hipMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; hipLaunchKernelGGL(( computePointGradients0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointGradients1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian0), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian1), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computePointHessian2), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(hipGetLastError()); double *tmp_hessian; checkCudaErrors(hipMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(hipMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(hipMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); hipLaunchKernelGGL(( computeExCovX), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(hipGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; hipLaunchKernelGGL(( computeCovDxdPi), dim3(grid), dim3(block_x), 0, 0, valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(hipGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; hipLaunchKernelGGL(( updateExCovX), dim3(grid_x2), dim3(block_x2), 0, 0, e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.y = 6; grid.z = 1; hipLaunchKernelGGL(( computeHessianListS0), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(hipGetLastError()); grid.z = 6; hipLaunchKernelGGL(( computeHessianListS1), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( computeHessianListS2), dim3(grid), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(hipGetLastError()); int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 6; grid.z = 6; hipLaunchKernelGGL(( matrixSum), dim3(grid_x), dim3(block_x), 0, 0, hessians, full_size, half_size, 6, 6, valid_points_num); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(hipDeviceSynchronize()); MatrixDevice dhessian(6, 6, valid_points_num, hessians); MatrixHost hhessian(6, 6); hhessian.moveToHost(dhessian); for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhessian(i, j); } } checkCudaErrors(hipFree(hessians)); checkCudaErrors(hipFree(point_hessians)); checkCudaErrors(hipFree(point_gradients)); checkCudaErrors(hipFree(tmp_hessian)); checkCudaErrors(hipFree(e_x_cov_x)); checkCudaErrors(hipFree(cov_dxd_pi)); if (valid_points != NULL) { checkCudaErrors(hipFree(valid_points)); } if (voxel_id != NULL) { checkCudaErrors(hipFree(voxel_id)); } if (starting_voxel_id != NULL) { checkCudaErrors(hipFree(starting_voxel_id)); } dhessian.memFree(); } template <typename T> __global__ void gpuSum(T *input, int size, int half_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < half_size; i += stride) { if (i + half_size < size) { input[i] += (half_size < size) ? input[i + half_size] : 0; } } } double GNormalDistributionsTransform::getFitnessScore(double max_range) { double fitness_score = 0.0; float *trans_x, *trans_y, *trans_z; checkCudaErrors(hipMalloc(&trans_x, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_y, sizeof(float) * points_number_)); checkCudaErrors(hipMalloc(&trans_z, sizeof(float) * points_number_)); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_number_, final_transformation_); int *valid_distance; checkCudaErrors(hipMalloc(&valid_distance, sizeof(int) * points_number_)); double *min_distance; checkCudaErrors(hipMalloc(&min_distance, sizeof(double) * points_number_)); voxel_grid_.nearestNeighborSearch(trans_x, trans_y, trans_z, points_number_, valid_distance, min_distance, max_range); int size = points_number_; int half_size; while (size > 1) { half_size = (size - 1) / 2 + 1; int block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; int grid_x = (half_size - 1) / block_x + 1; hipLaunchKernelGGL(( gpuSum<double>), dim3(grid_x), dim3(block_x), 0, 0, min_distance, size, half_size); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gpuSum<int>), dim3(grid_x), dim3(block_x), 0, 0, valid_distance, size, half_size); checkCudaErrors(hipGetLastError()); size = half_size; } checkCudaErrors(hipDeviceSynchronize()); int nr; checkCudaErrors(hipMemcpy(&nr, valid_distance, sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fitness_score, min_distance, sizeof(double), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(trans_x)); checkCudaErrors(hipFree(trans_y)); checkCudaErrors(hipFree(trans_z)); checkCudaErrors(hipFree(valid_distance)); checkCudaErrors(hipFree(min_distance)); if (nr > 0) return (fitness_score / nr); return DBL_MAX; } }
7cfabe4b16f836ea749043d424caedcda886327d.cu
#include "fast_pcl/ndt_gpu/NormalDistributionsTransform.h" #include "fast_pcl/ndt_gpu/debug.h" #include <cmath> #include <iostream> #include <pcl/common/transforms.h> namespace gpu { GNormalDistributionsTransform::GNormalDistributionsTransform() { //GRegistration::GRegistration(); gauss_d1_ = gauss_d2_ = 0; outlier_ratio_ = 0.55; step_size_ = 0.1; resolution_ = 1.0f; trans_probability_ = 0; double gauss_c1, gauss_c2, gauss_d3; // Initializes the guassian fitting parameters (eq. 6.8) [Magnusson 2009] gauss_c1 = 10.0 * (1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow (resolution_, 3); gauss_d3 = -log (gauss_c2); gauss_d1_ = -log ( gauss_c1 + gauss_c2 ) - gauss_d3; gauss_d2_ = -2 * log ((-log ( gauss_c1 * exp ( -0.5 ) + gauss_c2 ) - gauss_d3) / gauss_d1_); transformation_epsilon_ = 0.1; max_iterations_ = 35; j_ang_ = MatrixHost(24, 1); h_ang_ = MatrixHost(45, 1); dj_ang_ = MatrixDevice(24, 1); dh_ang_ = MatrixDevice(45, 1); real_iterations_ = 0; } GNormalDistributionsTransform::GNormalDistributionsTransform(const GNormalDistributionsTransform &other) { gauss_d1_ = other.gauss_d1_; gauss_d2_ = other.gauss_d2_; outlier_ratio_ = other.outlier_ratio_; j_ang_ = other.j_ang_; h_ang_ = other.h_ang_; dj_ang_ = other.dj_ang_; dh_ang_ = other.dh_ang_; step_size_ = other.step_size_; resolution_ = other.resolution_; trans_probability_ = other.trans_probability_; real_iterations_ = other.real_iterations_; voxel_grid_ = other.voxel_grid_; } GNormalDistributionsTransform::~GNormalDistributionsTransform() { dj_ang_.memFree(); dh_ang_.memFree(); } void GNormalDistributionsTransform::setStepSize(double step_size) { step_size_ = step_size; } void GNormalDistributionsTransform::setResolution(float resolution) { resolution_ = resolution; } void GNormalDistributionsTransform::setOutlierRatio(double olr) { outlier_ratio_ = olr; } double GNormalDistributionsTransform::getStepSize() const { return step_size_; } float GNormalDistributionsTransform::getResolution() const { return resolution_; } double GNormalDistributionsTransform::getOutlierRatio() const { return outlier_ratio_; } double GNormalDistributionsTransform::getTransformationProbability() const { return trans_probability_; } int GNormalDistributionsTransform::getRealIterations() { return real_iterations_; } double GNormalDistributionsTransform::auxilaryFunction_PsiMT(double a, double f_a, double f_0, double g_0, double mu) { return (f_a - f_0 - mu * g_0 * a); } double GNormalDistributionsTransform::auxilaryFunction_dPsiMT(double g_a, double g_0, double mu) { return (g_a - mu * g_0); } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZI>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } } void GNormalDistributionsTransform::setInputTarget(pcl::PointCloud<pcl::PointXYZ>::Ptr input) { // Copy input map data from the host memory to the GPU memory GRegistration::setInputTarget(input); // Build the voxel grid if (target_points_number_ != 0) { voxel_grid_.setLeafSize(resolution_, resolution_, resolution_); voxel_grid_.setInput(target_x_, target_y_, target_z_, target_points_number_); } } void GNormalDistributionsTransform::computeTransformation(const Eigen::Matrix<float, 4, 4> &guess) { if (dj_ang_.isEmpty()) { dj_ang_.memAlloc(); } if (dh_ang_.isEmpty()) { dh_ang_.memAlloc(); } nr_iterations_ = 0; converged_ = false; double gauss_c1, gauss_c2, gauss_d3; gauss_c1 = 10 * ( 1 - outlier_ratio_); gauss_c2 = outlier_ratio_ / pow(resolution_, 3); gauss_d3 = - log(gauss_c2); gauss_d1_ = -log(gauss_c1 + gauss_c2) - gauss_d3; gauss_d2_ = -2 * log((-log(gauss_c1 * exp(-0.5) + gauss_c2) - gauss_d3) / gauss_d1_); if (guess != Eigen::Matrix4f::Identity()) { final_transformation_ = guess; transformPointCloud(x_, y_, z_, trans_x_, trans_y_, trans_z_, points_number_, guess); } Eigen::Transform<float, 3, Eigen::Affine, Eigen::ColMajor> eig_transformation; eig_transformation.matrix() = final_transformation_; Eigen::Matrix<double, 6, 1> p, delta_p, score_gradient; Eigen::Vector3f init_translation = eig_transformation.translation(); Eigen::Vector3f init_rotation = eig_transformation.rotation().eulerAngles(0, 1, 2); p << init_translation(0), init_translation(1), init_translation(2), init_rotation(0), init_rotation(1), init_rotation(2); Eigen::Matrix<double, 6, 6> hessian; double score = 0; double delta_p_norm; score = computeDerivatives(score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_, p); int loop_time = 0; while (!converged_) { previous_transformation_ = transformation_; Eigen::JacobiSVD<Eigen::Matrix<double, 6, 6>> sv(hessian, Eigen::ComputeFullU | Eigen::ComputeFullV); delta_p = sv.solve(-score_gradient); delta_p_norm = delta_p.norm(); if (delta_p_norm == 0 || delta_p_norm != delta_p_norm) { trans_probability_ = score / static_cast<double>(points_number_); converged_ = delta_p_norm == delta_p_norm; return; } delta_p.normalize(); delta_p_norm = computeStepLengthMT(p, delta_p, delta_p_norm, step_size_, transformation_epsilon_ / 2, score, score_gradient, hessian, trans_x_, trans_y_, trans_z_, points_number_); delta_p *= delta_p_norm; Eigen::Translation<float, 3> translation(static_cast<float>(delta_p(0)), static_cast<float>(delta_p(1)), static_cast<float>(delta_p(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(delta_p(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(delta_p(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(delta_p(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); transformation_ = (translation * tmp4).matrix(); p = p + delta_p; //Not update visualizer if (nr_iterations_ > max_iterations_ || (nr_iterations_ && (std::fabs(delta_p_norm) < transformation_epsilon_))) converged_ = true; nr_iterations_++; loop_time++; } trans_probability_ = score / static_cast<double>(points_number_); } /* First step of computing point gradients */ __global__ void computePointGradients0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg00, double *pg11, double *pg22, double *pg13, double *pg23, double *pg04, double *pg14) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Set the 3x3 block start from (0, 0) to identity matrix pg00[i] = 1; pg11[i] = 1; pg22[i] = 1; //Compute point derivatives pg13[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg23[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg04[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg14[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* Second step of computing point gradients */ __global__ void computePointGradients1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dj_ang, double *pg24, double *pg05, double *pg15, double *pg25) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double j_ang[12]; if (threadIdx.x < 12) { j_ang[threadIdx.x] = dj_ang[threadIdx.x + 12]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); //Compute point derivatives pg24[i] = o_x * j_ang[0] + o_y * j_ang[1] + o_z * j_ang[2]; pg05[i] = o_x * j_ang[3] + o_y * j_ang[4] + o_z * j_ang[5]; pg15[i] = o_x * j_ang[6] + o_y * j_ang[7] + o_z * j_ang[8]; pg25[i] = o_x * j_ang[9] + o_y * j_ang[10] + o_z * j_ang[11]; } } /* First step of computing point hessians */ __global__ void computePointHessian0(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph93, double *ph103, double *ph113, double *ph123, double *ph94, double *ph133, double *ph104, double *ph143, double *ph114, double *ph153, double *ph95, double *ph163, double *ph105, double *ph173, double *ph115) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph93[i] = 0; ph103[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph113[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph123[i] = ph94[i] = 0; ph133[i] = ph104[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph143[i] = ph114[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph153[i] = ph95[i] = 0; ph163[i] = ph105[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph173[i] = ph115[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian1(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph124, double *ph134, double *ph144, double *ph154, double *ph125, double *ph164, double *ph135, double *ph174, double *ph145) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[18]; if (threadIdx.x < 18) { h_ang[threadIdx.x] = dh_ang[18 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph124[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph134[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph144[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; ph154[i] = ph125[i] = o_x * h_ang[9] + o_y * h_ang[10] + o_z * h_ang[11]; ph164[i] = ph135[i] = o_x * h_ang[12] + o_y * h_ang[13] + o_z * h_ang[14]; ph174[i] = ph145[i] = o_x * h_ang[15] + o_y * h_ang[16] + o_z * h_ang[17]; } } __global__ void computePointHessian2(float *x, float *y, float *z, int points_num, int *valid_points, int valid_points_num, double *dh_ang, double *ph155, double *ph165, double *ph175) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ double h_ang[9]; if (threadIdx.x < 9) { h_ang[threadIdx.x] = dh_ang[36 + threadIdx.x]; } __syncthreads(); for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; //Orignal coordinates double o_x = static_cast<double>(x[pid]); double o_y = static_cast<double>(y[pid]); double o_z = static_cast<double>(z[pid]); ph155[i] = o_x * h_ang[0] + o_y * h_ang[1] + o_z * h_ang[2]; ph165[i] = o_x * h_ang[3] + o_y * h_ang[4] + o_z * h_ang[5]; ph175[i] = o_x * h_ang[6] + o_y * h_ang[7] + o_z * h_ang[8]; } } /* compute score_inc list for input points. * The final score_inc is calculated by a reduction sum * on this score_inc list. */ __global__ void computeScoreList(int *starting_voxel_id, int *voxel_id, int valid_points_num, double *e_x_cov_x, double gauss_d1, double *score) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { double score_inc = 0; for (int vid = starting_voxel_id[i]; vid < starting_voxel_id[i + 1]; vid++) { double tmp_ex = e_x_cov_x[vid]; score_inc += (tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex) ? 0 : -gauss_d1 * tmp_ex; } score[i] = score_inc; } } /* First step to compute score gradient list for input points */ __global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, int voxel_num, double *e_x_cov_x, double *cov_dxd_pi, double gauss_d1, int valid_voxel_num, double *score_gradients) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *sg = score_gradients + col * valid_points_num; double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double tmp_sg = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex; } } sg[i] = tmp_sg; } } } /* Intermediate step to compute e_x_cov_x */ __global__ void computeExCovX(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centr_x, double *centr_y, double *centr_z, double gauss_d1, double gauss_d2, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double t_x, t_y, t_z; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; t_x = d_x - centr_x[vid]; t_y = d_y - centr_y[vid]; t_z = d_z - centr_z[vid]; e_x_cov_x[j] = exp(-gauss_d2 * ((t_x * icov00[vid] + t_y * icov01[vid] + t_z * icov02[vid]) * t_x + ((t_x * icov10[vid] + t_y * icov11[vid] + t_z * icov12[vid]) * t_y) + ((t_x * icov20[vid] + t_y * icov21[vid] + t_z * icov22[vid]) * t_z)) / 2.0); } } } /* update e_x_cov_x - Reusable portion of Equation 6.12 and 6.13 [Magnusson 2009] */ __global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = id; i < valid_voxel_num; i += stride) { e_x_cov_x[i] *= gauss_d2; } } /* compute cov_dxd_pi as reusable portion of Equation 6.12 and 6.13 [Magnusson 2009]*/ __global__ void computeCovDxdPi(int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *inverse_covariance, int voxel_num, double gauss_d1, double gauss_d2, double *point_gradients, double *cov_dxd_pi, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 3 && col < 6) { double *icov0 = inverse_covariance + row * 3 * voxel_num; double *icov1 = icov0 + voxel_num; double *icov2 = icov1 + voxel_num; double *cov_dxd_pi_tmp = cov_dxd_pi + (row * 6 + col) * valid_voxel_num; double *pg_tmp0 = point_gradients + col * valid_points_num; double *pg_tmp1 = pg_tmp0 + 6 * valid_points_num; double *pg_tmp2 = pg_tmp1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { double pg0 = pg_tmp0[i]; double pg1 = pg_tmp1[i]; double pg2 = pg_tmp2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; cov_dxd_pi_tmp[j] = icov0[vid] * pg0 + icov1[vid] * pg1 + icov2[vid] * pg2; } } } } /* First step to compute hessian list for input points */ __global__ void computeHessianListS0(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_gradients, double *tmp_hessian, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int col = blockIdx.y; if (col < 6) { double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { int vid = voxel_id[j]; tmp_h[j] = (d_x - centroid_x[vid]) * (icov00[vid] * pg0 + icov01[vid] * pg1 + icov02[vid] * pg2) + (d_y - centroid_y[vid]) * (icov10[vid] * pg0 + icov11[vid] * pg1 + icov12[vid] * pg2) + (d_z - centroid_z[vid]) * (icov20[vid] * pg0 + icov21[vid] * pg1 + icov22[vid] * pg2); } } } } /* Fourth step to compute hessian list */ __global__ void computeHessianListS1(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double gauss_d2, double *hessians, double *e_x_cov_x, double *tmp_hessian, double *cov_dxd_pi, double *point_gradients, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *cov_dxd_pi_mat0 = cov_dxd_pi + row * valid_voxel_num; double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num; double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num; double *tmp_h = tmp_hessian + col * valid_voxel_num; double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_pg0 = point_gradients + col * valid_points_num; double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num; double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double pg0 = tmp_pg0[i]; double pg1 = tmp_pg1[i]; double pg2 = tmp_pg2[i]; double final_hessian = 0.0; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { double cov_dxd0 = cov_dxd_pi_mat0[j]; double cov_dxd1 = cov_dxd_pi_mat1[j]; double cov_dxd2 = cov_dxd_pi_mat2[j]; tmp_ex *= gauss_d1; final_hessian += -gauss_d2 * ((d_x - centroid_x[vid]) * cov_dxd0 + (d_y - centroid_y[vid]) * cov_dxd1 + (d_z - centroid_z[vid]) * cov_dxd2) * tmp_h[j] * tmp_ex; final_hessian += (pg0 * cov_dxd0 + pg1 * cov_dxd1 + pg2 * cov_dxd2) * tmp_ex; } } h[i] = final_hessian; } } } __global__ void computeHessianListS2(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double *e_x_cov_x, double *icov00, double *icov01, double *icov02, double *icov10, double *icov11, double *icov12, double *icov20, double *icov21, double *icov22, double *point_hessians, double *hessians, int valid_voxel_num) { int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; if (row < 6 && col < 6) { double *h = hessians + (row * 6 + col) * valid_points_num; double *tmp_ph0 = point_hessians + ((3 * row) * 6 + col) * valid_points_num; double *tmp_ph1 = tmp_ph0 + 6 * valid_points_num; double *tmp_ph2 = tmp_ph1 + 6 * valid_points_num; for (int i = id; i < valid_points_num; i += stride) { int pid = valid_points[i]; double d_x = static_cast<double>(trans_x[pid]); double d_y = static_cast<double>(trans_y[pid]); double d_z = static_cast<double>(trans_z[pid]); double ph0 = tmp_ph0[i]; double ph1 = tmp_ph1[i]; double ph2 = tmp_ph2[i]; double final_hessian = h[i]; for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) { //Transformed coordinates int vid = voxel_id[j]; double tmp_ex = e_x_cov_x[j]; if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) { tmp_ex *= gauss_d1; final_hessian += (d_x - centroid_x[vid]) * (icov00[vid] * ph0 + icov01[vid] * ph1 + icov02[vid] * ph2) * tmp_ex; final_hessian += (d_y - centroid_y[vid]) * (icov10[vid] * ph0 + icov11[vid] * ph1 + icov12[vid] * ph2) * tmp_ex; final_hessian += (d_z - centroid_z[vid]) * (icov20[vid] * ph0 + icov21[vid] * ph1 + icov22[vid] * ph2) * tmp_ex; } } h[i] = final_hessian; } } } /* Compute sum of a list of matrices */ __global__ void matrixSum(double *matrix_list, int full_size, int half_size, int rows, int cols, int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; int row = blockIdx.y; int col = blockIdx.z; for (int i = index; i < half_size && row < rows && col < cols; i += stride) { MatrixDevice left(rows, cols, offset, matrix_list + i); double *right_ptr = (i + half_size < full_size) ? matrix_list + i + half_size : NULL; MatrixDevice right(rows, cols, offset, right_ptr); if (right_ptr != NULL) { left(row, col) += right(row, col); } } } /* Compute sum of score_inc list */ __global__ void sumScore(double *score, int full_size, int half_size) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < half_size; i += stride) { score[i] += (i + half_size < full_size) ? score[i + half_size] : 0; } } double GNormalDistributionsTransform::computeDerivatives(Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> pose, bool compute_hessian) { MatrixHost p(6, 1); for (int i = 0; i < 6; i++) { p(i) = pose(i, 0); } score_gradient.setZero (); hessian.setZero (); //Compute Angle Derivatives computeAngleDerivatives(p); //Radius Search int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; valid_points = voxel_id = starting_voxel_id = NULL; voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); double *centroid = voxel_grid_.getCentroidList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num == 0) return 0; //Update score gradient and hessian matrix double *gradients, *hessians, *point_gradients, *point_hessians, *score; checkCudaErrors(cudaMalloc(&gradients, sizeof(double) * valid_points_num * 6)); checkCudaErrors(cudaMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(cudaMalloc(&score, sizeof(double) * valid_points_num)); checkCudaErrors(cudaMemset(gradients, 0, sizeof(double) * valid_points_num * 6)); checkCudaErrors(cudaMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; computePointGradients0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(cudaGetLastError()); computePointGradients1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(cudaGetLastError()); if (compute_hessian) { computePointHessian0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(cudaGetLastError()); computePointHessian1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(cudaGetLastError()); computePointHessian2<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(cudaGetLastError()); } checkCudaErrors(cudaDeviceSynchronize()); double *tmp_hessian; checkCudaErrors(cudaMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(cudaMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(cudaMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); computeExCovX<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(cudaGetLastError()); computeScoreList<<<grid_x, block_x>>>(starting_voxel_id, voxel_id, valid_points_num, e_x_cov_x, gauss_d1_, score); checkCudaErrors(cudaGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; updateExCovX<<<grid_x2, block_x2>>>(e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; computeCovDxdPi<<<grid, block_x>>>(valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.x = grid_x; grid.y = 6; grid.z = 1; computeScoreGradientList<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, voxel_num, e_x_cov_x, cov_dxd_pi, gauss_d1_, valid_voxel_num, gradients); checkCudaErrors(cudaGetLastError()); if (compute_hessian) { grid.y = 6; grid.z = 1; computeHessianListS0<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.z = 6; computeHessianListS1<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(cudaGetLastError()); computeHessianListS2<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(cudaGetLastError()); } int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 1; grid.z = 6; matrixSum<<<grid, block_x>>>(gradients, full_size, half_size, 1, 6, valid_points_num); checkCudaErrors(cudaGetLastError()); grid.y = 6; matrixSum<<<grid, block_x>>>(hessians, full_size, half_size, 6, 6, valid_points_num); checkCudaErrors(cudaGetLastError()); sumScore<<<grid_x, block_x>>>(score, full_size, half_size); checkCudaErrors(cudaGetLastError()); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(cudaDeviceSynchronize()); MatrixDevice dgrad(1, 6, valid_points_num, gradients), dhess(6, 6, valid_points_num, hessians); MatrixHost hgrad(1, 6), hhess(6, 6); hgrad.moveToHost(dgrad); hhess.moveToHost(dhess); for (int i = 0; i < 6; i++) { score_gradient(i) = hgrad(i); } for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhess(i, j); } } double score_inc; checkCudaErrors(cudaMemcpy(&score_inc, score, sizeof(double), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(gradients)); checkCudaErrors(cudaFree(hessians)); checkCudaErrors(cudaFree(point_hessians)); checkCudaErrors(cudaFree(point_gradients)); checkCudaErrors(cudaFree(score)); checkCudaErrors(cudaFree(tmp_hessian)); checkCudaErrors(cudaFree(e_x_cov_x)); checkCudaErrors(cudaFree(cov_dxd_pi)); if (valid_points != NULL) checkCudaErrors(cudaFree(valid_points)); if (voxel_id != NULL) checkCudaErrors(cudaFree(voxel_id)); if (starting_voxel_id != NULL) checkCudaErrors(cudaFree(starting_voxel_id)); return score_inc; } void GNormalDistributionsTransform::computeAngleDerivatives(MatrixHost pose, bool compute_hessian) { double cx, cy, cz, sx, sy, sz; if (fabs(pose(3)) < 10e-5) { cx = 1.0; sx = 0.0; } else { cx = cos(pose(3)); sx = sin(pose(3)); } if (fabs(pose(4)) < 10e-5) { cy = 1.0; sy = 0.0; } else { cy = cos(pose(4)); sy = sin(pose(4)); } if (fabs(pose(5)) < 10e-5) { cz = 1.0; sz = 0.0; } else { cz = cos(pose(5)); sz = sin(pose(5)); } j_ang_(0) = -sx * sz + cx * sy * cz; j_ang_(1) = -sx * cz - cx * sy * sz; j_ang_(2) = -cx * cy; j_ang_(3) = cx * sz + sx * sy * cz; j_ang_(4) = cx * cz - sx * sy * sz; j_ang_(5) = -sx * cy; j_ang_(6) = -sy * cz; j_ang_(7) = sy * sz; j_ang_(8) = cy; j_ang_(9) = sx * cy * cz; j_ang_(10) = -sx * cy * sz; j_ang_(11) = sx * sy; j_ang_(12) = -cx * cy * cz; j_ang_(13) = cx * cy * sz; j_ang_(14) = -cx * sy; j_ang_(15) = -cy * sz; j_ang_(16) = -cy * cz; j_ang_(17) = 0; j_ang_(18) = cx * cz - sx * sy * sz; j_ang_(19) = -cx * sz - sx * sy * cz; j_ang_(20) = 0; j_ang_(21) = sx * cz + cx * sy * sz; j_ang_(22) = cx * sy * cz - sx * sz; j_ang_(23) = 0; j_ang_.moveToGpu(dj_ang_); if (compute_hessian) { h_ang_(0) = -cx * sz - sx * sy * cz; h_ang_(1) = -cx * cz + sx * sy * sz; h_ang_(2) = sx * cy; h_ang_(3) = -sx * sz + cx * sy * cz; h_ang_(4) = -cx * sy * sz - sx * cz; h_ang_(5) = -cx * cy; h_ang_(6) = cx * cy * cz; h_ang_(7) = -cx * cy * sz; h_ang_(8) = cx * sy; h_ang_(9) = sx * cy * cz; h_ang_(10) = -sx * cy * sz; h_ang_(11) = sx * sy; h_ang_(12) = -sx * cz - cx * sy * sz; h_ang_(13) = sx * sz - cx * sy * cz; h_ang_(14) = 0; h_ang_(15) = cx * cz - sx * sy * sz; h_ang_(16) = -sx * sy * cz - cx * sz; h_ang_(17) = 0; h_ang_(18) = -cy * cz; h_ang_(19) = cy * sz; h_ang_(20) = sy; h_ang_(21) = -sx * sy * cz; h_ang_(22) = sx * sy * sz; h_ang_(23) = sx * cy; h_ang_(24) = cx * sy * cz; h_ang_(25) = -cx * sy * sz; h_ang_(26) = -cx * cy; h_ang_(27) = sy * sz; h_ang_(28) = sy * cz; h_ang_(29) = 0; h_ang_(30) = -sx * cy * sz; h_ang_(31) = -sx * cy * cz; h_ang_(32) = 0; h_ang_(33) = cx * cy * sz; h_ang_(34) = cx * cy * cz; h_ang_(35) = 0; h_ang_(36) = -cy * cz; h_ang_(37) = cy * sz; h_ang_(38) = 0; h_ang_(39) = -cx * sz - sx * sy * cz; h_ang_(40) = -cx * cz + sx * sy * sz; h_ang_(41) = 0; h_ang_(42) = -sx * sz + cx * sy * cz; h_ang_(43) = -cx * sy * sz - sx * cz; h_ang_(44) = 0; h_ang_.moveToGpu(dh_ang_); } } __global__ void gpuTransform(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int point_num, MatrixDevice transform) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; float x, y, z; for (int i = idx; i < point_num; i += stride) { x = in_x[i]; y = in_y[i]; z = in_z[i]; trans_x[i] = transform(0, 0) * x + transform(0, 1) * y + transform(0, 2) * z + transform(0, 3); trans_y[i] = transform(1, 0) * x + transform(1, 1) * y + transform(1, 2) * z + transform(1, 3); trans_z[i] = transform(2, 0) * x + transform(2, 1) * y + transform(2, 2) * z + transform(2, 3); } } void GNormalDistributionsTransform::transformPointCloud(float *in_x, float *in_y, float *in_z, float *trans_x, float *trans_y, float *trans_z, int points_number, Eigen::Matrix<float, 4, 4> transform) { Eigen::Transform<float, 3, Eigen::Affine> t(transform); MatrixHost htrans(3, 4); MatrixDevice dtrans(3, 4); dtrans.memAlloc(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { htrans(i, j) = t(i, j); } } htrans.moveToGpu(dtrans); if (points_number > 0) { int block_x = (points_number <= BLOCK_SIZE_X) ? points_number : BLOCK_SIZE_X; int grid_x = (points_number - 1) / block_x + 1; gpuTransform<<<grid_x, block_x >>>(in_x, in_y, in_z, trans_x, trans_y, trans_z, points_number, dtrans); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); } dtrans.memFree(); } double GNormalDistributionsTransform::computeStepLengthMT(const Eigen::Matrix<double, 6, 1> &x, Eigen::Matrix<double, 6, 1> &step_dir, double step_init, double step_max, double step_min, double &score, Eigen::Matrix<double, 6, 1> &score_gradient, Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num) { double phi_0 = -score; double d_phi_0 = -(score_gradient.dot(step_dir)); Eigen::Matrix<double, 6, 1> x_t; if (d_phi_0 >= 0) { if (d_phi_0 == 0) return 0; else { d_phi_0 *= -1; step_dir *= -1; } } int max_step_iterations = 10; int step_iterations = 0; double mu = 1.e-4; double nu = 0.9; double a_l = 0, a_u = 0; double f_l = auxilaryFunction_PsiMT(a_l, phi_0, phi_0, d_phi_0, mu); double g_l = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); double f_u = auxilaryFunction_PsiMT(a_u, phi_0, phi_0, d_phi_0, mu); double g_u = auxilaryFunction_dPsiMT(d_phi_0, d_phi_0, mu); bool interval_converged = (step_max - step_min) > 0, open_interval = true; double a_t = step_init; a_t = std::min(a_t, step_max); a_t = std::max(a_t, step_min); x_t = x + step_dir * a_t; Eigen::Translation<float, 3> translation(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); Eigen::AngleAxis<float> tmp1(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); Eigen::AngleAxis<float> tmp2(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); Eigen::AngleAxis<float> tmp3(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); Eigen::AngleAxis<float> tmp4(tmp1 * tmp2 * tmp3); final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t); double phi_t = -score; double d_phi_t = -(score_gradient.dot(step_dir)); double psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); double d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); while (!interval_converged && step_iterations < max_step_iterations && !(psi_t <= 0 && d_phi_t <= -nu * d_phi_0)) { if (open_interval) { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { a_t = trialValueSelectionMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } a_t = (a_t < step_max) ? a_t : step_max; a_t = (a_t > step_min) ? a_t : step_min; x_t = x + step_dir * a_t; translation = Eigen::Translation<float, 3>(static_cast<float>(x_t(0)), static_cast<float>(x_t(1)), static_cast<float>(x_t(2))); tmp1 = Eigen::AngleAxis<float>(static_cast<float>(x_t(3)), Eigen::Vector3f::UnitX()); tmp2 = Eigen::AngleAxis<float>(static_cast<float>(x_t(4)), Eigen::Vector3f::UnitY()); tmp3 = Eigen::AngleAxis<float>(static_cast<float>(x_t(5)), Eigen::Vector3f::UnitZ()); tmp4 = tmp1 * tmp2 * tmp3; final_transformation_ = (translation * tmp4).matrix(); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_num, final_transformation_); score = computeDerivatives(score_gradient, hessian, trans_x, trans_y, trans_z, points_num, x_t, false); phi_t -= score; d_phi_t -= (score_gradient.dot(step_dir)); psi_t = auxilaryFunction_PsiMT(a_t, phi_t, phi_0, d_phi_0, mu); d_psi_t = auxilaryFunction_dPsiMT(d_phi_t, d_phi_0, mu); if (open_interval && (psi_t <= 0 && d_psi_t >= 0)) { open_interval = false; f_l += phi_0 - mu * d_phi_0 * a_l; g_l += mu * d_phi_0; f_u += phi_0 - mu * d_phi_0 * a_u; g_u += mu * d_phi_0; } if (open_interval) { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, psi_t, d_psi_t); } else { interval_converged = updateIntervalMT(a_l, f_l, g_l, a_u, f_u, g_u, a_t, phi_t, d_phi_t); } step_iterations++; } if (step_iterations) { computeHessian(hessian, trans_x, trans_y, trans_z, points_num, x_t); } real_iterations_ += step_iterations; return a_t; } //Copied from ndt.hpp double GNormalDistributionsTransform::trialValueSelectionMT (double a_l, double f_l, double g_l, double a_u, double f_u, double g_u, double a_t, double f_t, double g_t) { // Case 1 in Trial Value Selection [More, Thuente 1994] if (f_t > f_l) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, f_t and g_l // Equation 2.4.2 [Sun, Yuan 2006] double a_q = a_l - 0.5 * (a_l - a_t) * g_l / (g_l - (f_l - f_t) / (a_l - a_t)); if (std::fabs (a_c - a_l) < std::fabs (a_q - a_l)) return (a_c); else return (0.5 * (a_q + a_c)); } // Case 2 in Trial Value Selection [More, Thuente 1994] else if (g_t * g_l < 0) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); // Equation 2.4.56 [Sun, Yuan 2006] double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates f_l, g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; if (std::fabs (a_c - a_t) >= std::fabs (a_s - a_t)) return (a_c); else return (a_s); } // Case 3 in Trial Value Selection [More, Thuente 1994] else if (std::fabs (g_t) <= std::fabs (g_l)) { // Calculate the minimizer of the cubic that interpolates f_l, f_t, g_l and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_l) / (a_t - a_l) - g_t - g_l; double w = std::sqrt (z * z - g_t * g_l); double a_c = a_l + (a_t - a_l) * (w - g_l - z) / (g_t - g_l + 2 * w); // Calculate the minimizer of the quadratic that interpolates g_l and g_t // Equation 2.4.5 [Sun, Yuan 2006] double a_s = a_l - (a_l - a_t) / (g_l - g_t) * g_l; double a_t_next; if (std::fabs (a_c - a_t) < std::fabs (a_s - a_t)) a_t_next = a_c; else a_t_next = a_s; if (a_t > a_l) return (std::min (a_t + 0.66 * (a_u - a_t), a_t_next)); else return (std::max (a_t + 0.66 * (a_u - a_t), a_t_next)); } // Case 4 in Trial Value Selection [More, Thuente 1994] else { // Calculate the minimizer of the cubic that interpolates f_u, f_t, g_u and g_t // Equation 2.4.52 [Sun, Yuan 2006] double z = 3 * (f_t - f_u) / (a_t - a_u) - g_t - g_u; double w = std::sqrt (z * z - g_t * g_u); // Equation 2.4.56 [Sun, Yuan 2006] return (a_u + (a_t - a_u) * (w - g_u - z) / (g_t - g_u + 2 * w)); } } //Copied from ndt.hpp double GNormalDistributionsTransform::updateIntervalMT (double &a_l, double &f_l, double &g_l, double &a_u, double &f_u, double &g_u, double a_t, double f_t, double g_t) { // Case U1 in Update Algorithm and Case a in Modified Update Algorithm [More, Thuente 1994] if (f_t > f_l) { a_u = a_t; f_u = f_t; g_u = g_t; return (false); } // Case U2 in Update Algorithm and Case b in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) > 0) { a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Case U3 in Update Algorithm and Case c in Modified Update Algorithm [More, Thuente 1994] else if (g_t * (a_l - a_t) < 0) { a_u = a_l; f_u = f_l; g_u = g_l; a_l = a_t; f_l = f_t; g_l = g_t; return (false); } // Interval Converged else return (true); } void GNormalDistributionsTransform::computeHessian(Eigen::Matrix<double, 6, 6> &hessian, float *trans_x, float *trans_y, float *trans_z, int points_num, Eigen::Matrix<double, 6, 1> &p) { int *valid_points, *voxel_id, *starting_voxel_id; int valid_voxel_num, valid_points_num; //Radius Search voxel_grid_.radiusSearch(trans_x, trans_y, trans_z, points_num, resolution_, INT_MAX, &valid_points, &starting_voxel_id, &voxel_id, &valid_voxel_num, &valid_points_num); double *centroid = voxel_grid_.getCentroidList(); double *covariance = voxel_grid_.getCovarianceList(); double *inverse_covariance = voxel_grid_.getInverseCovarianceList(); int *points_per_voxel = voxel_grid_.getPointsPerVoxelList(); int voxel_num = voxel_grid_.getVoxelNum(); if (valid_points_num <= 0) return; //Update score gradient and hessian matrix double *hessians, *point_gradients, *point_hessians; checkCudaErrors(cudaMalloc(&hessians, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMalloc(&point_gradients, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMalloc(&point_hessians, sizeof(double) * valid_points_num * 18 * 6)); checkCudaErrors(cudaMemset(hessians, 0, sizeof(double) * valid_points_num * 6 * 6)); checkCudaErrors(cudaMemset(point_gradients, 0, sizeof(double) * valid_points_num * 3 * 6)); checkCudaErrors(cudaMemset(point_hessians, 0, sizeof(double) * valid_points_num * 18 * 6)); int block_x = (valid_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_points_num; int grid_x = (valid_points_num - 1) / block_x + 1; dim3 grid; computePointGradients0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients, point_gradients + valid_points_num * 7, point_gradients + valid_points_num * 14, point_gradients + valid_points_num * 9, point_gradients + valid_points_num * 15, point_gradients + valid_points_num * 4, point_gradients + valid_points_num * 10); checkCudaErrors(cudaGetLastError()); computePointGradients1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dj_ang_.buffer(), point_gradients + valid_points_num * 16, point_gradients + valid_points_num * 5, point_gradients + valid_points_num * 11, point_gradients + valid_points_num * 17); checkCudaErrors(cudaGetLastError()); computePointHessian0<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 57, point_hessians + valid_points_num * 63, point_hessians + valid_points_num * 69, point_hessians + valid_points_num * 75, point_hessians + valid_points_num * 58, point_hessians + valid_points_num * 81, point_hessians + valid_points_num * 64, point_hessians + valid_points_num * 87, point_hessians + valid_points_num * 70, point_hessians + valid_points_num * 93, point_hessians + valid_points_num * 59, point_hessians + valid_points_num * 99, point_hessians + valid_points_num * 65, point_hessians + valid_points_num * 105, point_hessians + valid_points_num * 71); checkCudaErrors(cudaGetLastError()); computePointHessian1<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 76, point_hessians + valid_points_num * 82, point_hessians + valid_points_num * 88, point_hessians + valid_points_num * 94, point_hessians + valid_points_num * 77, point_hessians + valid_points_num * 100, point_hessians + valid_points_num * 83, point_hessians + valid_points_num * 106, point_hessians + valid_points_num * 89); checkCudaErrors(cudaGetLastError()); computePointHessian2<<<grid_x, block_x>>>(x_, y_, z_, points_number_, valid_points, valid_points_num, dh_ang_.buffer(), point_hessians + valid_points_num * 95, point_hessians + valid_points_num * 101, point_hessians + valid_points_num * 107); checkCudaErrors(cudaGetLastError()); double *tmp_hessian; checkCudaErrors(cudaMalloc(&tmp_hessian, sizeof(double) * valid_voxel_num * 6)); double *e_x_cov_x; checkCudaErrors(cudaMalloc(&e_x_cov_x, sizeof(double) * valid_voxel_num)); double *cov_dxd_pi; checkCudaErrors(cudaMalloc(&cov_dxd_pi, sizeof(double) * valid_voxel_num * 3 * 6)); computeExCovX<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num); checkCudaErrors(cudaGetLastError()); grid.x = grid_x; grid.y = 3; grid.z = 6; computeCovDxdPi<<<grid, block_x>>>(valid_points, starting_voxel_id, voxel_id, valid_points_num, inverse_covariance, voxel_num, gauss_d1_, gauss_d2_, point_gradients, cov_dxd_pi, valid_voxel_num); checkCudaErrors(cudaGetLastError()); int block_x2 = (valid_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : valid_voxel_num; int grid_x2 = (valid_voxel_num - 1) / block_x2 + 1; updateExCovX<<<grid_x2, block_x2>>>(e_x_cov_x, gauss_d2_, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.y = 6; grid.z = 1; computeHessianListS0<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_gradients, tmp_hessian, valid_voxel_num); checkCudaErrors(cudaGetLastError()); grid.z = 6; computeHessianListS1<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, gauss_d2_, hessians, e_x_cov_x, tmp_hessian, cov_dxd_pi, point_gradients, valid_voxel_num); checkCudaErrors(cudaGetLastError()); computeHessianListS2<<<grid, block_x>>>(trans_x, trans_y, trans_z, valid_points, starting_voxel_id, voxel_id, valid_points_num, centroid, centroid + voxel_num, centroid + 2 * voxel_num, gauss_d1_, e_x_cov_x, inverse_covariance, inverse_covariance + voxel_num, inverse_covariance + 2 * voxel_num, inverse_covariance + 3 * voxel_num, inverse_covariance + 4 * voxel_num, inverse_covariance + 5 * voxel_num, inverse_covariance + 6 * voxel_num, inverse_covariance + 7 * voxel_num, inverse_covariance + 8 * voxel_num, point_hessians, hessians, valid_voxel_num); checkCudaErrors(cudaGetLastError()); int full_size = valid_points_num; int half_size = (full_size - 1) / 2 + 1; while (full_size > 1) { block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; grid_x = (half_size - 1) / block_x + 1; grid.x = grid_x; grid.y = 6; grid.z = 6; matrixSum<<<grid_x, block_x>>>(hessians, full_size, half_size, 6, 6, valid_points_num); full_size = half_size; half_size = (full_size - 1) / 2 + 1; } checkCudaErrors(cudaDeviceSynchronize()); MatrixDevice dhessian(6, 6, valid_points_num, hessians); MatrixHost hhessian(6, 6); hhessian.moveToHost(dhessian); for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { hessian(i, j) = hhessian(i, j); } } checkCudaErrors(cudaFree(hessians)); checkCudaErrors(cudaFree(point_hessians)); checkCudaErrors(cudaFree(point_gradients)); checkCudaErrors(cudaFree(tmp_hessian)); checkCudaErrors(cudaFree(e_x_cov_x)); checkCudaErrors(cudaFree(cov_dxd_pi)); if (valid_points != NULL) { checkCudaErrors(cudaFree(valid_points)); } if (voxel_id != NULL) { checkCudaErrors(cudaFree(voxel_id)); } if (starting_voxel_id != NULL) { checkCudaErrors(cudaFree(starting_voxel_id)); } dhessian.memFree(); } template <typename T> __global__ void gpuSum(T *input, int size, int half_size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = idx; i < half_size; i += stride) { if (i + half_size < size) { input[i] += (half_size < size) ? input[i + half_size] : 0; } } } double GNormalDistributionsTransform::getFitnessScore(double max_range) { double fitness_score = 0.0; float *trans_x, *trans_y, *trans_z; checkCudaErrors(cudaMalloc(&trans_x, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_y, sizeof(float) * points_number_)); checkCudaErrors(cudaMalloc(&trans_z, sizeof(float) * points_number_)); transformPointCloud(x_, y_, z_, trans_x, trans_y, trans_z, points_number_, final_transformation_); int *valid_distance; checkCudaErrors(cudaMalloc(&valid_distance, sizeof(int) * points_number_)); double *min_distance; checkCudaErrors(cudaMalloc(&min_distance, sizeof(double) * points_number_)); voxel_grid_.nearestNeighborSearch(trans_x, trans_y, trans_z, points_number_, valid_distance, min_distance, max_range); int size = points_number_; int half_size; while (size > 1) { half_size = (size - 1) / 2 + 1; int block_x = (half_size > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_size; int grid_x = (half_size - 1) / block_x + 1; gpuSum<double><<<grid_x, block_x>>>(min_distance, size, half_size); checkCudaErrors(cudaGetLastError()); gpuSum<int><<<grid_x, block_x>>>(valid_distance, size, half_size); checkCudaErrors(cudaGetLastError()); size = half_size; } checkCudaErrors(cudaDeviceSynchronize()); int nr; checkCudaErrors(cudaMemcpy(&nr, valid_distance, sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fitness_score, min_distance, sizeof(double), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(trans_x)); checkCudaErrors(cudaFree(trans_y)); checkCudaErrors(cudaFree(trans_z)); checkCudaErrors(cudaFree(valid_distance)); checkCudaErrors(cudaFree(min_distance)); if (nr > 0) return (fitness_score / nr); return DBL_MAX; } }
be9dd68c30a9d6ae55682f8d69cb79624eec9354.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define NUM 1024 __shared__ int v[NUM]; __global__ void deadlock() { if (threadIdx.x % 2 == 0) { v[threadIdx.x]++; __syncthreads(); } else { v[threadIdx.x]--; //__syncthreads(); // remove this one to incur a barrier dismatch } } int main() { hipLaunchKernelGGL(( deadlock), dim3(1),dim3(NUM), 0, 0, ); return 0; }
be9dd68c30a9d6ae55682f8d69cb79624eec9354.cu
#include <stdio.h> #define NUM 1024 __shared__ int v[NUM]; __global__ void deadlock() { if (threadIdx.x % 2 == 0) { v[threadIdx.x]++; __syncthreads(); } else { v[threadIdx.x]--; //__syncthreads(); // remove this one to incur a barrier dismatch } } int main() { deadlock<<<1,NUM>>>(); return 0; }
34c917417f160708bdb16348f401b9f51efc4c81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _ZQ_CUDA_OPTICAL_FLOW_2D_UTILS_CU_ #define _ZQ_CUDA_OPTICAL_FLOW_2D_UTILS_CU_ #include "ZQ_CUDA_OpticalFlow2D_Utils.cuh" #include "ZQ_CUDA_ImageProcessing2D.cuh" #include "ZQ_CUDA_PoissonSolver2D.cuh" namespace ZQ_CUDA_OpticalFlow2D { /**************** Base Kernels **********************************/ __global__ void compute_psi_data_Kernel(float* psi_data, const float* imdx, const float* imdy, const float* imdt, const float* du, const float* dv, const float eps, const int width, const int height, const int nChannels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; float value = 0; for(int i = 0;i < nChannels;i++) { float tmp = (imdt[offset*nChannels+i]+imdx[offset*nChannels+i]*du[offset]+imdy[offset*nChannels+i]*dv[offset]); value += tmp*tmp; } psi_data[offset] = 0.5/sqrt(value+eps); } __global__ void compute_psi_smooth_Kernel(float* psi_smooth, const float* u, const float* v, const float eps, const int width, const int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; float ux = (x < width-1) ? (u[offset+1]-u[offset]) : 0; float uy = (y < height-1) ? (u[offset+width]-u[offset]) : 0; float vx = (x < width-1) ? (v[offset+1]-v[offset]) : 0; float vy = (y < height-1) ? (v[offset+width]-v[offset]) : 0; psi_smooth[offset] = 0.5/sqrt(ux*ux+uy*uy+vx*vx+vy*vy+eps); } __global__ void compute_psi_u_v_Kernel(float* psi_u, float* psi_v, const float* u, const float* v, const float eps, const int width, const int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; psi_u[offset] = 0.5/sqrt(u[offset]*u[offset]+eps); psi_v[offset] = 0.5/sqrt(v[offset]*v[offset]+eps); } __global__ void compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel(float* imdxdx, float* imdxdy, float* imdydy, float* imdtdx, float* imdtdy, const float* imdx, const float* imdy, const float* imdt, const int width, const int height, const int nChannels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; imdxdx[offset] = 0; imdxdy[offset] = 0; imdydy[offset] = 0; imdtdx[offset] = 0; imdtdy[offset] = 0; for(int c = 0; c < nChannels;c++) { imdxdx[offset] += imdx[offset*nChannels+c]*imdx[offset*nChannels+c]; imdxdy[offset] += imdx[offset*nChannels+c]*imdy[offset*nChannels+c]; imdydy[offset] += imdy[offset*nChannels+c]*imdy[offset*nChannels+c]; imdtdx[offset] += imdt[offset*nChannels+c]*imdx[offset*nChannels+c]; imdtdy[offset] += imdt[offset*nChannels+c]*imdy[offset*nChannels+c]; } imdxdx[offset] /= nChannels; imdxdy[offset] /= nChannels; imdydy[offset] /= nChannels; imdtdx[offset] /= nChannels; imdtdy[offset] /= nChannels; } __global__ void compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel(float* imdxdx, float* imdxdy, float* imdydy, float* imdtdx, float* imdtdy, const float* imdx, const float* imdy, const float* imdt, const float* psi_data, const int width, const int height, const int nChannels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; imdxdx[offset] = 0; imdxdy[offset] = 0; imdydy[offset] = 0; imdtdx[offset] = 0; imdtdy[offset] = 0; for(int c = 0; c < nChannels;c++) { imdxdx[offset] += imdx[offset*nChannels+c]*imdx[offset*nChannels+c]; imdxdy[offset] += imdx[offset*nChannels+c]*imdy[offset*nChannels+c]; imdydy[offset] += imdy[offset*nChannels+c]*imdy[offset*nChannels+c]; imdtdx[offset] += imdt[offset*nChannels+c]*imdx[offset*nChannels+c]; imdtdy[offset] += imdt[offset*nChannels+c]*imdy[offset*nChannels+c]; } imdxdx[offset] *= psi_data[offset]/nChannels; imdxdy[offset] *= psi_data[offset]/nChannels; imdydy[offset] *= psi_data[offset]/nChannels; imdtdx[offset] *= psi_data[offset]/nChannels; imdtdy[offset] *= psi_data[offset]/nChannels; } __global__ void Laplacian_withpsismooth_Kernel(float* output, const float* input,const float* psi_smooth, const int width, const int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; float value = 0; float in_x,in_x_,in_y,in_y_; in_x = (x < width-1) ? (input[offset+1] - input[offset]) : 0 ; in_x_ = (x > 0) ? (input[offset] - input[offset-1]) : 0; value += (x > 0) ? (psi_smooth[offset]*in_x - psi_smooth[offset-1]*in_x_) : 0; in_y = (y < height-1) ? (input[offset+width] - input[offset]) : 0; in_y_ = (y > 0) ? (input[offset] - input[offset-width]) : 0; value += (y > 0) ? (psi_smooth[offset]*in_y - psi_smooth[offset-width]*in_y_) : 0; output[offset] = value; } __global__ void OpticalFlow_L2_RedBlack_Kernel(float* du, float* dv, const float* u, const float* v, const float* imdxdx, const float* imdxdy, const float* imdydy, const float* imdtdx, const float* imdtdy, const float* laplace_u, const float* laplace_v, const int width, const int height, const float alpha, const float beta, const float omega, const bool redKernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; if(rest == (redKernel ? 1 : 0)) return; int i = y; int j = x; int offset = i * width + j; float sigma1 = 0, sigma2 = 0, coeff = 0; float _weight; if(j > 0) { _weight = 1; sigma1 += _weight*du[offset-1]; sigma2 += _weight*dv[offset-1]; coeff += _weight; } if(j < width-1) { _weight = 1; sigma1 += _weight*du[offset+1]; sigma2 += _weight*dv[offset+1]; coeff += _weight; } if(i > 0) { _weight = 1; sigma1 += _weight*du[offset-width]; sigma2 += _weight*dv[offset-width]; coeff += _weight; } if(i < height-1) { _weight = 1; sigma1 += _weight*du[offset+width]; sigma2 += _weight*dv[offset+width]; coeff += _weight; } sigma1 *= alpha; sigma2 *= alpha; coeff *= alpha; // compute u sigma1 += alpha*laplace_u[offset] - imdtdx[offset] - imdxdy[offset]*dv[offset] - beta*u[offset]; float coeff1 = coeff + imdxdx[offset] + beta; du[offset] = (1-omega)*du[offset] + omega/coeff1*sigma1; // compute v sigma2 += alpha*laplace_v[offset] - imdtdy[offset] - imdxdy[offset]*du[offset] - beta*v[offset]; float coeff2 = coeff + imdydy[offset] + beta; dv[offset] = (1-omega)*dv[offset] + omega/coeff2*sigma2; } __global__ void OpticalFlow_L1_RedBlack_Kernel(float* du, float* dv, const float* u, const float* v, const float* imdxdx, const float* imdxdy, const float* imdydy, const float* imdtdx, const float* imdtdy, const float* laplace_u, const float* laplace_v,const float* psi_smooth, const float* psi_u, const float* psi_v, const int width, const int height, const float alpha, const float beta, const float omega, const bool redKernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; if(rest == (redKernel ? 1 : 0)) return; int i = y; int j = x; int offset = i * width + j; float sigma1 = 0, sigma2 = 0, coeff = 0; float _weight; if(j > 0) { _weight = psi_smooth[offset-1]; sigma1 += _weight*du[offset-1]; sigma2 += _weight*dv[offset-1]; coeff += _weight; } if(j < width-1) { _weight = psi_smooth[offset]; sigma1 += _weight*du[offset+1]; sigma2 += _weight*dv[offset+1]; coeff += _weight; } if(i > 0) { _weight = psi_smooth[offset-width]; sigma1 += _weight*du[offset-width]; sigma2 += _weight*dv[offset-width]; coeff += _weight; } if(i < height-1) { _weight = psi_smooth[offset]; sigma1 += _weight*du[offset+width]; sigma2 += _weight*dv[offset+width]; coeff += _weight; } sigma1 *= alpha; sigma2 *= alpha; coeff *= alpha; // compute u sigma1 += alpha*laplace_u[offset] - imdtdx[offset] - imdxdy[offset]*dv[offset] - beta*psi_u[offset]*u[offset]; float coeff1 = coeff + imdxdx[offset] + beta*psi_u[offset]; du[offset] = (1-omega)*du[offset] + omega/coeff1*sigma1; // compute v sigma2 += alpha*laplace_v[offset] - imdtdy[offset] - imdxdy[offset]*du[offset] - beta*psi_v[offset]*v[offset]; float coeff2 = coeff + imdydy[offset] + beta*psi_v[offset]; dv[offset] = (1-omega)*dv[offset] + omega/coeff2*sigma2; } /******************************* for ADMM ********************************************/ __global__ void proximalF_RedBlack_Kernel(float* du, float* dv, const float* imdxdx, const float* imdxdy, const float* imdydy, const float* imdtdx, const float* imdtdy, const float* laplace_u, const float* laplace_v, const float* u, const float* z_u, const float* v, const float* z_v, const int width, const int height, const float alpha, const float beta, const float lambda, const float omega, const bool redKernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; if(rest == (redKernel ? 1 : 0)) return; int i = y; int j = x; int offset = i * width + j; float sigma1 = 0, sigma2 = 0, coeff = 0; float _weight; if(j > 0) { _weight = 1; sigma1 += _weight*du[offset-1]; sigma2 += _weight*dv[offset-1]; coeff += _weight; } if(j < width-1) { _weight = 1; sigma1 += _weight*du[offset+1]; sigma2 += _weight*dv[offset+1]; coeff += _weight; } if(i > 0) { _weight = 1; sigma1 += _weight*du[offset-width]; sigma2 += _weight*dv[offset-width]; coeff += _weight; } if(i < height-1) { _weight = 1; sigma1 += _weight*du[offset+width]; sigma2 += _weight*dv[offset+width]; coeff += _weight; } sigma1 *= alpha; sigma2 *= alpha; coeff *= alpha; // compute u sigma1 += alpha*laplace_u[offset] - imdtdx[offset] - imdxdy[offset]*dv[offset] - beta*u[offset] - 0.5*lambda*(u[offset] - z_u[offset]); float coeff1 = coeff + imdxdx[offset] + beta + 0.5*lambda; du[offset] = (1-omega)*du[offset] + omega/coeff1*sigma1; // compute v sigma2 += alpha*laplace_v[offset] - imdtdy[offset] - imdxdy[offset]*du[offset] - beta*v[offset] - 0.5*lambda*(v[offset] - z_v[offset]); float coeff2 = coeff + imdydy[offset] + beta + 0.5*lambda; dv[offset] = (1-omega)*dv[offset] + omega/coeff2*sigma2; } __global__ void proximal_F2_Kernel(float* u, float* v, const float* z_u, const float* z_v, const float* warpU, const float* warpV, const int width, const int height, const float gama, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; u[offset] = (gama*warpU[offset]+0.5*lambda*z_u[offset])/(gama+0.5*lambda); v[offset] = (gama*warpV[offset]+0.5*lambda*z_v[offset])/(gama+0.5*lambda); } __global__ void compute_z_u_z_v_for_proximal_F1_Kernel(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_q1, const float* v_for_q1, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; z_u[offset] = u_for_F1[offset] - 1.0/lambda*u_for_q1[offset]; z_v[offset] = v_for_F1[offset] - 1.0/lambda*v_for_q1[offset]; } __global__ void compute_z_u_z_v_for_proximal_F2_Kernel(float* z_u, float* z_v, const float* u_for_F2, const float* v_for_F2, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; z_u[offset] = u_for_F2[offset] - 1.0/lambda*u_for_q2[offset]; z_v[offset] = v_for_F2[offset] - 1.0/lambda*v_for_q2[offset]; } __global__ void compute_z_u_z_v_for_proximal_G_Kernel(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_q1, const float* v_for_q1, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; z_u[offset] = 0.5*(u_for_F1[offset] + 1.0/lambda*u_for_q1[offset] + u_for_F2[offset] + 1.0/lambda*u_for_q2[offset]); z_v[offset] = 0.5*(v_for_F1[offset] + 1.0/lambda*v_for_q1[offset] + v_for_F2[offset] + 1.0/lambda*v_for_q2[offset]); } __global__ void update_u_v_for_q1_q2_Kernel(float* u_for_q1, float* v_for_q1, float* u_for_q2, float* v_for_q2, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_G, const float* v_for_G, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; u_for_q1[offset] += lambda*(u_for_F1[offset] - u_for_G[offset]); v_for_q1[offset] += lambda*(v_for_F1[offset] - v_for_G[offset]); u_for_q2[offset] += lambda*(u_for_F2[offset] - u_for_G[offset]); v_for_q2[offset] += lambda*(v_for_F2[offset] - v_for_G[offset]); } /****************************************************************************************************/ void cu_Compute_z_u_z_v_for_proximal_F1(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_q1, const float* v_for_q1, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( compute_z_u_z_v_for_proximal_F1_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,z_v,u_for_F1,v_for_F1,u_for_q1,v_for_q1,width,height,lambda); } void cu_Compute_z_u_z_v_for_proximal_F2(float* z_u, float* z_v, const float* u_for_F2, const float* v_for_F2, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( compute_z_u_z_v_for_proximal_F2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,z_v,u_for_F2,v_for_F2,u_for_q2,v_for_q2,width,height,lambda); } void cu_Compute_z_u_z_v_for_proximal_G(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_q1, const float* v_for_q1, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( compute_z_u_z_v_for_proximal_G_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2, u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,lambda); } void cu_Update_u_v_for_q1_q2(float* u_for_q1, float* v_for_q1, float* u_for_q2, float* v_for_q2, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_G, const float* v_for_G, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( update_u_v_for_q1_q2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u_for_q1,v_for_q1,u_for_q2,v_for_q2, u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,lambda); } void cu_GetDerivatives(float* imdx, float* imdy, float* imdt, const float* Im1, const float* Im2, const int width, const int height, const int nChannels) { float* tmpBuf = 0; float* im1 = 0; float* im2 = 0; checkCudaErrors( hipMalloc((void**)&tmpBuf, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&im1, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&im2, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(tmpBuf,0,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(im1,0,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(im2,0,sizeof(float)*width*height*nChannels) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Imfilter_h_Gaussian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmpBuf,Im1,width,height,nChannels); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Imfilter_v_Gaussian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, im1,tmpBuf,width,height,nChannels); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Imfilter_h_Gaussian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmpBuf,Im2,width,height,nChannels); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Imfilter_v_Gaussian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, im2,tmpBuf,width,height,nChannels); /* tmpBuf = im1*0.4 + im2*0.6 */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmpBuf,im1,0.4,im2,0.6,width,height,nChannels); /* imdx = \partial_x {tmpBuf} */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Derivative_x_Advanced_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdx,tmpBuf,width,height,nChannels); /* imdy = \partial_y {tmpBuf} */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Derivative_y_Advanced_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdy,tmpBuf,width,height,nChannels); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdt,im2,1,im1,-1,width,height,nChannels); checkCudaErrors( hipFree(tmpBuf) ); checkCudaErrors( hipFree(im1) ); checkCudaErrors( hipFree(im2) ); tmpBuf = 0; im1 = 0; im2 = 0; } /*alpha: penality for velocity gradient*/ void cu_OpticalFlow_L2(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( hipMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdy, 0, sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ hipLaunchKernelGGL(( compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_u,u,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_v,v,width,height,1); // set omega float omega = 1.0f; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { hipLaunchKernelGGL(( OpticalFlow_L2_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,true); hipLaunchKernelGGL(( OpticalFlow_L2_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,false); } /* red - black solver end */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( hipFree(du) ); checkCudaErrors( hipFree(dv) ); checkCudaErrors( hipFree(laplace_u) ); checkCudaErrors( hipFree(laplace_v) ); checkCudaErrors( hipFree(imdx) ); checkCudaErrors( hipFree(imdy) ); checkCudaErrors( hipFree(imdt) ); checkCudaErrors( hipFree(imdxdx) ); checkCudaErrors( hipFree(imdxdy) ); checkCudaErrors( hipFree(imdydy) ); checkCudaErrors( hipFree(imdtdx) ); checkCudaErrors( hipFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } /*alpha: penality for velocity gradient*/ void cu_OpticalFlow_L2_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( hipMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdy, 0, sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ hipLaunchKernelGGL(( compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_u,u,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_v,v,width,height,1); // set omega float omega = 1.0f; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { hipLaunchKernelGGL(( OpticalFlow_L2_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,true); hipLaunchKernelGGL(( OpticalFlow_L2_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,false); } /* red - black solver end */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); checkCudaErrors( hipFree(du) ); checkCudaErrors( hipFree(dv) ); checkCudaErrors( hipFree(laplace_u) ); checkCudaErrors( hipFree(laplace_v) ); checkCudaErrors( hipFree(imdx) ); checkCudaErrors( hipFree(imdy) ); checkCudaErrors( hipFree(imdt) ); checkCudaErrors( hipFree(imdxdx) ); checkCudaErrors( hipFree(imdxdy) ); checkCudaErrors( hipFree(imdydy) ); checkCudaErrors( hipFree(imdtdx) ); checkCudaErrors( hipFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } /*alpha: penality for velocity gradient*/ void cu_OpticalFlow_L1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter,const int nSORIter) { float eps = optical_flow_L1_eps; float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; float* psi_data = 0; float* psi_smooth = 0; float* psi_u = 0; float* psi_v = 0; checkCudaErrors( hipMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&psi_data,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&psi_smooth,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&psi_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&psi_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(psi_data,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(psi_smooth,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(psi_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(psi_v,0,sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nInnerFPIterations = nInnerFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); for(int inner_it = 0; inner_it < nInnerFPIterations;inner_it++) { /* compute psi_data*/ hipLaunchKernelGGL(( compute_psi_data_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, psi_data,imdx,imdy,imdt,du,dv,eps,width,height,nChannels); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ hipLaunchKernelGGL(( compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,psi_data,width,height,nChannels); /*compute psi_smooth*/ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); hipLaunchKernelGGL(( compute_psi_smooth_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, psi_smooth,u,v,eps,width,height); hipLaunchKernelGGL(( compute_psi_u_v_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, psi_u,psi_v,u,v,eps,width,height); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,-1,width,height,1); /* laplace u, v with psi_smooth */ hipLaunchKernelGGL(( Laplacian_withpsismooth_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_u,u,psi_smooth,width,height); hipLaunchKernelGGL(( Laplacian_withpsismooth_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_v,v,psi_smooth,width,height); // set omega float omega = 1.0f; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { hipLaunchKernelGGL(( OpticalFlow_L1_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,psi_smooth, psi_u,psi_v,width,height,alpha,beta,omega,true); hipLaunchKernelGGL(( OpticalFlow_L1_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,psi_smooth, psi_u,psi_v,width,height,alpha,beta,omega,false); } /* red - black solver end */ } hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( hipFree(du) ); checkCudaErrors( hipFree(dv) ); checkCudaErrors( hipFree(laplace_u) ); checkCudaErrors( hipFree(laplace_v) ); checkCudaErrors( hipFree(imdx) ); checkCudaErrors( hipFree(imdy) ); checkCudaErrors( hipFree(imdt) ); checkCudaErrors( hipFree(imdxdx) ); checkCudaErrors( hipFree(imdxdy) ); checkCudaErrors( hipFree(imdydy) ); checkCudaErrors( hipFree(imdtdx) ); checkCudaErrors( hipFree(imdtdy) ); checkCudaErrors( hipFree(psi_data) ); checkCudaErrors( hipFree(psi_smooth) ); checkCudaErrors( hipFree(psi_u) ); checkCudaErrors( hipFree(psi_v) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; psi_data = 0; psi_smooth = 0; psi_u = 0; psi_v = 0; } void cu_OpticalFlow_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter) { float eps = optical_flow_L1_eps; float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; float* psi_data = 0; checkCudaErrors( hipMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&psi_data,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(psi_data,0,sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nInnerFPIterations = nInnerFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); for(int inner_it = 0; inner_it < nInnerFPIterations;inner_it++) { /* compute psi_data*/ hipLaunchKernelGGL(( compute_psi_data_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, psi_data,imdx,imdy,imdt,du,dv,eps,width,height,nChannels); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ hipLaunchKernelGGL(( compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,psi_data, width,height,nChannels); /* laplace u, v with psi_smooth */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_u,u,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { hipLaunchKernelGGL(( OpticalFlow_L2_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,true); hipLaunchKernelGGL(( OpticalFlow_L2_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,false); } /* red - black solver end */ } hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( hipFree(du) ); checkCudaErrors( hipFree(dv) ); checkCudaErrors( hipFree(laplace_u) ); checkCudaErrors( hipFree(laplace_v) ); checkCudaErrors( hipFree(imdx) ); checkCudaErrors( hipFree(imdy) ); checkCudaErrors( hipFree(imdt) ); checkCudaErrors( hipFree(imdxdx) ); checkCudaErrors( hipFree(imdxdy) ); checkCudaErrors( hipFree(imdydy) ); checkCudaErrors( hipFree(imdtdx) ); checkCudaErrors( hipFree(imdtdy) ); checkCudaErrors( hipFree(psi_data) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; psi_data = 0; } void cu_Proximal_F1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* z_u, const float* z_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( hipMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdy, 0, sizeof(float)*width*height) ); /* ProximalF(z_u,z_v,\lambda) = minimize_{u,v} \int {|I_2(x+u,y+v)-I_1(x,y)|^2} + \alpha^2 \int {|\nabla u|^2 + |\nabla v|^2} + \beta^2 \int {|u|^2 + |v|^2} + \lambda \int {|u-z_u|^2 + |v-z_v|^2} * * The Euler-Lagrange equation is: * I_t I_x + \beta^2 u + \lambda(u-z_u) = \alpha^2 \Delta u * I_t I_y + \beta^2 v + \lambda(v-z_v) = \alpha^2 \Delta v */ int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ hipLaunchKernelGGL(( compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_u,u,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { hipLaunchKernelGGL(( proximalF_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,true); hipLaunchKernelGGL(( proximalF_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,false); } /* red - black solver end */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( hipFree(du) ); checkCudaErrors( hipFree(dv) ); checkCudaErrors( hipFree(laplace_u) ); checkCudaErrors( hipFree(laplace_v) ); checkCudaErrors( hipFree(imdx) ); checkCudaErrors( hipFree(imdy) ); checkCudaErrors( hipFree(imdt) ); checkCudaErrors( hipFree(imdxdx) ); checkCudaErrors( hipFree(imdxdy) ); checkCudaErrors( hipFree(imdydy) ); checkCudaErrors( hipFree(imdtdx) ); checkCudaErrors( hipFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } void cu_Proximal_F1_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* z_u, const float* z_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( hipMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdy, 0, sizeof(float)*width*height) ); /* ProximalF(z_u,z_v,\lambda) = minimize_{u,v} \int {|I_2(x+u,y+v)-I_1(x,y)|^2} + \alpha^2 \int {|\nabla u|^2 + |\nabla v|^2} + \beta^2 \int {|u|^2 + |v|^2} + \lambda \int {|u-z_u|^2 + |v-z_v|^2} * * The Euler-Lagrange equation is: * I_t I_x + \beta^2 u + \lambda(u-z_u) = \alpha^2 \Delta u * I_t I_y + \beta^2 v + \lambda(v-z_v) = \alpha^2 \Delta v */ int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ hipLaunchKernelGGL(( compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_u,u,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { hipLaunchKernelGGL(( proximalF_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,true); hipLaunchKernelGGL(( proximalF_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,false); } /* red - black solver end */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); checkCudaErrors( hipFree(du) ); checkCudaErrors( hipFree(dv) ); checkCudaErrors( hipFree(laplace_u) ); checkCudaErrors( hipFree(laplace_v) ); checkCudaErrors( hipFree(imdx) ); checkCudaErrors( hipFree(imdy) ); checkCudaErrors( hipFree(imdt) ); checkCudaErrors( hipFree(imdxdx) ); checkCudaErrors( hipFree(imdxdy) ); checkCudaErrors( hipFree(imdydy) ); checkCudaErrors( hipFree(imdtdx) ); checkCudaErrors( hipFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } void cu_Proximal_F1_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* z_u, const float* z_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter) { float eps = optical_flow_L1_eps; float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; float* psi_data = 0; checkCudaErrors( hipMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&psi_data, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(imdtdy, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(psi_data,0, sizeof(float)*width*height) ); /* ProximalF(z_u,z_v,\lambda) = minimize_{u,v} \int {|I_2(x+u,y+v)-I_1(x,y)|^2} + \alpha^2 \int {|\nabla u|^2 + |\nabla v|^2} + \beta^2 \int {|u|^2 + |v|^2} + \lambda \int {|u-z_u|^2 + |v-z_v|^2} * * The Euler-Lagrange equation is: * I_t I_x + \beta^2 u + \lambda(u-z_u) = \alpha^2 \Delta u * I_t I_y + \beta^2 v + \lambda(v-z_v) = \alpha^2 \Delta v */ int nOuterFPIterations = nOuterFPIter; int nInnerFPIterations = nInnerFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( hipMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( hipMemset(dv, 0, sizeof(float)*width*height) ); for(int inner_it = 0; inner_it < nInnerFPIterations; inner_it++) { /*compute psi_data*/ hipLaunchKernelGGL(( compute_psi_data_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, psi_data,imdx,imdy,imdt,du,dv,eps,width,height,nChannels); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ hipLaunchKernelGGL(( compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,psi_data, width,height,nChannels); /* laplace u, v */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_u,u,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { hipLaunchKernelGGL(( proximalF_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,true); hipLaunchKernelGGL(( proximalF_RedBlack_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,false); } /* red - black solver end */ } hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,du,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( hipFree(du) ); checkCudaErrors( hipFree(dv) ); checkCudaErrors( hipFree(laplace_u) ); checkCudaErrors( hipFree(laplace_v) ); checkCudaErrors( hipFree(imdx) ); checkCudaErrors( hipFree(imdy) ); checkCudaErrors( hipFree(imdt) ); checkCudaErrors( hipFree(imdxdx) ); checkCudaErrors( hipFree(imdxdy) ); checkCudaErrors( hipFree(imdydy) ); checkCudaErrors( hipFree(imdtdx) ); checkCudaErrors( hipFree(imdtdy) ); checkCudaErrors( hipFree(psi_data) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; psi_data = 0; } void cu_Proximal_F2_first(float* u, float* v, const float* z_u, const float* z_v, const float* next_u, const float* next_v, const int width, const int height, const float gama, const float lambda, const int nFPIter, const int nPoissonIter) { int nOuterFPIterations = nFPIter; int nPoissonIterations = nPoissonIter; float* warpU = 0; float* warpV = 0; checkCudaErrors( hipMalloc((void**)&warpU,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpV,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpU,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpV,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int out_it = 0;out_it < nOuterFPIterations;out_it++) { hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpU,u,next_u,u,v,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpV,v,next_v,u,v,width,height,1); ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(warpU,warpV,width,height,nPoissonIterations); hipLaunchKernelGGL(( proximal_F2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,v,z_u,z_v,warpU,warpV,width,height,gama,lambda); } checkCudaErrors( hipFree(warpU) ); checkCudaErrors( hipFree(warpV) ); warpU = 0; warpV = 0; } void cu_Proximal_F2_middle(float* u, float* v, const float* z_u, const float* z_v, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const float gama, const float lambda, const int nFPIter, const int nPoissonIter) { int nOuterFPIterations = nFPIter; int nPoissonIterations = nPoissonIter; float* warpU_pre = 0; float* warpV_pre = 0; float* warpU_nex = 0; float* warpV_nex = 0; float* tmp_u = 0; float* tmp_v = 0; checkCudaErrors( hipMalloc((void**)&warpU_pre,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpV_pre,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpU_nex,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpV_nex,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&tmp_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&tmp_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpU_pre,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpV_pre,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpU_nex,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpV_nex,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int out_it = 0;out_it < nOuterFPIterations;out_it++) { hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpU_nex,u,next_u,u,v,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpV_nex,v,next_v,u,v,width,height,1); checkCudaErrors( hipMemset(tmp_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(tmp_v,0,sizeof(float)*width*height) ); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmp_u,u,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmp_v,v,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpU_pre,u,pre_u,tmp_u,tmp_v,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpV_pre,v,pre_v,tmp_u,tmp_v,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmp_u,warpU_pre,0.5,warpU_nex,0.5,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmp_v,warpV_pre,0.5,warpV_nex,0.5,width,height,1); ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(tmp_u,tmp_v,width,height,nPoissonIterations); hipLaunchKernelGGL(( proximal_F2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,v,z_u,z_v,tmp_u,tmp_v,width,height,2*gama,lambda); } checkCudaErrors( hipFree(warpU_pre) ); checkCudaErrors( hipFree(warpV_pre) ); checkCudaErrors( hipFree(warpU_nex) ); checkCudaErrors( hipFree(warpV_nex) ); checkCudaErrors( hipFree(tmp_u) ); checkCudaErrors( hipFree(tmp_v) ); warpU_pre = 0; warpV_pre = 0; warpU_nex = 0; warpV_nex = 0; tmp_u = 0; tmp_v = 0; } void cu_Proximal_F2_last(float* u, float* v, const float* z_u, const float* z_v, const float* pre_u, const float* pre_v, const int width, const int height, const float gama, const float lambda, const int nFPIter, const int nPoissonIter) { int nOuterFPIterations = nFPIter; int nPoissonIterations = nPoissonIter; float* warpU = 0; float* warpV = 0; float* tmp_u = 0; float* tmp_v = 0; checkCudaErrors( hipMalloc((void**)&warpU,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpV,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&tmp_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&tmp_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpU,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(warpV,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int out_it = 0;out_it < nOuterFPIterations;out_it++) { checkCudaErrors( hipMemset(tmp_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(tmp_v,0,sizeof(float)*width*height) ); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmp_u,u,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, tmp_v,v,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpU,u,pre_u,tmp_u,tmp_v,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, warpV,v,pre_v,tmp_u,tmp_v,width,height,1); ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(warpU,warpV,width,height,nPoissonIterations); hipLaunchKernelGGL(( proximal_F2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u,v,z_u,z_v,warpU,warpV,width,height,gama,lambda); } checkCudaErrors( hipFree(warpU) ); checkCudaErrors( hipFree(warpV) ); checkCudaErrors( hipFree(tmp_u) ); checkCudaErrors( hipFree(tmp_v) ); warpU = 0; warpV = 0; tmp_u = 0; tmp_v = 0; } void cu_Proximal_G(float* u, float* v, const float* z_u, const float* z_v, const int width, const int height, const int nPoissonIter) { checkCudaErrors( hipMemcpy(u,z_u,sizeof(float)*width*height, hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v,z_v,sizeof(float)*width*height, hipMemcpyDeviceToDevice) ); int nPoissonIterations = nPoissonIter; ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(u,v,width,height,nPoissonIterations); } void cu_OpticalFlow_ADMM(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float* u_for_F = u; float* v_for_F = v; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q = 0; float* v_for_q = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int it = 0;it < ADMMIter;it++) { hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,u_for_G,1,u_for_q,-1.0,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_v,v_for_G,1,v_for_q,-1.0,width,height,1); cu_Proximal_F1(u_for_F,v_for_F,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,u_for_F,1,u_for_q,1.0,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_v,v_for_F,1,v_for_q,1.0,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u_for_q,u_for_F,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u_for_q,u_for_G,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v_for_q,v_for_F,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v_for_q,v_for_G,-1,width,height,1); } checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q) ); checkCudaErrors( hipFree(v_for_q) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F = 0; v_for_F = 0; u_for_G = 0; v_for_G = 0; u_for_q = 0; v_for_q = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float* u_for_F = u; float* v_for_F = v; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q = 0; float* v_for_q = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int it = 0;it < ADMMIter;it++) { hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,u_for_G,1,u_for_q,-1.0,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_v,v_for_G,1,v_for_q,-1.0,width,height,1); cu_Proximal_F1_Occupy(u_for_F,v_for_F,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,u_for_F,1,u_for_q,1.0,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_v,v_for_F,1,v_for_q,1.0,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u_for_q,u_for_F,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u_for_q,u_for_G,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v_for_q,v_for_F,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v_for_q,v_for_G,-1,width,height,1); } checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q) ); checkCudaErrors( hipFree(v_for_q) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F = 0; v_for_F = 0; u_for_G = 0; v_for_G = 0; u_for_q = 0; v_for_q = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nPoissonIter) { float* u_for_F = u; float* v_for_F = v; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q = 0; float* v_for_q = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int it = 0;it < ADMMIter;it++) { hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,u_for_G,1,u_for_q,-1.0,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_v,v_for_G,1,v_for_q,-1.0,width,height,1); cu_Proximal_F1_DL1(u_for_F,v_for_F,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_u,u_for_F,1,u_for_q,1.0,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, z_v,v_for_F,1,v_for_q,1.0,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u_for_q,u_for_F,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, u_for_q,u_for_G,-1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v_for_q,v_for_F,1,width,height,1); hipLaunchKernelGGL(( ZQ_CUDA_ImageProcessing2D::Addwith_Kernel), dim3(gridSize),dim3(blockSize), 0, 0, v_for_q,v_for_G,-1,width,height,1); } checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q) ); checkCudaErrors( hipFree(v_for_q) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F = 0; v_for_F = 0; u_for_G = 0; v_for_G = 0; u_for_q = 0; v_for_q = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_first(u_for_F2,v_for_F2,z_u,z_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_First_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_Occupy(u_for_F1,v_for_F1,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_first(u_for_F2,v_for_F2,z_u,z_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_DL1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_first(u_for_F2,v_for_F2,z_u,z_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_middle(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Middle_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_Occupy(u_for_F1,v_for_F1,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_middle(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_DL1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_middle(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_last(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Last_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_Occupy(u_for_F1,v_for_F1,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_last(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( hipMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,hipMemcpyDeviceToDevice) ); checkCudaErrors( hipMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( hipMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_DL1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_last(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( hipFree(u_for_F2) ); checkCudaErrors( hipFree(v_for_F2) ); checkCudaErrors( hipFree(u_for_G) ); checkCudaErrors( hipFree(v_for_G) ); checkCudaErrors( hipFree(u_for_q1) ); checkCudaErrors( hipFree(v_for_q1) ); checkCudaErrors( hipFree(u_for_q2) ); checkCudaErrors( hipFree(v_for_q2) ); checkCudaErrors( hipFree(z_u) ); checkCudaErrors( hipFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } /***********************************************************************/ extern "C" void InitDevice2D(const int deviceid) { int num_devices = 0; checkCudaErrors(hipGetDeviceCount(&num_devices)); int cur_device = deviceid; if(deviceid < 0 || deviceid >= num_devices) { cur_device = 0; hipDeviceProp_t properties; hipGetDeviceProperties(&properties, cur_device); printf("use the Device ID:\t%d\n", cur_device); printf("Device Name is used:\t%s\n", properties.name ); } checkCudaErrors(hipSetDevice(cur_device)); } extern "C" float OpticalFlow2D_L2(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); cu_OpticalFlow_L2(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,nOuterFPIter,nSORIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_L2_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); cu_OpticalFlow_L2_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,width,height,nChannels,alpha,beta,nOuterFPIter,nSORIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(occupy_d) ); checkCudaErrors( hipFree(warpIm2_d) ); hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_L1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter,const int nSORIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); cu_OpticalFlow_L1(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,nOuterFPIter,nInnerFPIter,nSORIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,hipMemcpyHostToDevice) ); cu_OpticalFlow_DL1(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,nOuterFPIter,nInnerFPIter,nSORIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height,hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,lambda,ADMMIter,nOuterFPIter,nSORIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,width,height,nChannels,alpha,beta,lambda,ADMMIter,nOuterFPIter,nSORIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(occupy_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,lambda,ADMMIter,nOuterFPIter,nInnerFPIter,nSORIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_u_d,next_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_v_d,next_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_First(u_d,v_d,warpIm2_d,Im1_d,Im2_d,next_u_d,next_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(next_u_d) ); checkCudaErrors( hipFree(next_v_d) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_First_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_u_d,next_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_v_d,next_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_First_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,next_u_d,next_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(next_u_d) ); checkCudaErrors( hipFree(next_v_d) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(occupy_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_u_d,next_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_v_d,next_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1_First(u_d,v_d,warpIm2_d,Im1_d,Im2_d,next_u_d,next_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nInnerFPIter, nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(next_u_d) ); checkCudaErrors( hipFree(next_v_d) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_u_d,next_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_v_d,next_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Middle(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,next_u_d,next_v_d,width,height,nChannels, alpha,beta,gamma,lambda,ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(next_u_d) ); checkCudaErrors( hipFree(next_v_d) ); checkCudaErrors( hipFree(pre_u_d) ); checkCudaErrors( hipFree(pre_v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; pre_u_d = 0; pre_v_d = 0; u_d = 0; v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Middle_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_u_d,next_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_v_d,next_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Middle_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,pre_u_d,pre_v_d,next_u_d,next_v_d,width,height,nChannels, alpha,beta,gamma,lambda,ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(next_u_d) ); checkCudaErrors( hipFree(next_v_d) ); checkCudaErrors( hipFree(pre_u_d) ); checkCudaErrors( hipFree(pre_v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(occupy_d) ); checkCudaErrors( hipFree(warpIm2_d) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; pre_u_d = 0; pre_v_d = 0; u_d = 0; v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_u_d,next_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(next_v_d,next_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1_Middle(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,next_u_d,next_v_d,width,height,nChannels, alpha,beta,gamma,lambda,ADMMIter,nOuterFPIter,nInnerFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(next_u_d) ); checkCudaErrors( hipFree(next_v_d) ); checkCudaErrors( hipFree(pre_u_d) ); checkCudaErrors( hipFree(pre_v_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(warpIm2_d) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; pre_u_d = 0; pre_v_d = 0; u_d = 0; v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Last(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(warpIm2_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(pre_u_d) ); checkCudaErrors( hipFree(pre_v_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; pre_u_d = 0; pre_v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Last_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(occupy_d,occupy,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Last_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,pre_u_d,pre_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(warpIm2_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(occupy_d) ); checkCudaErrors( hipFree(pre_u_d) ); checkCudaErrors( hipFree(pre_v_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; pre_u_d = 0; pre_v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( hipMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( hipMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( hipMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( hipMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(u_d,u,sizeof(float)*width*height, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(v_d,v,sizeof(float)*width*height, hipMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1_Last(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nInnerFPIter,nWarpFPIter,nPoissonIter); checkCudaErrors( hipMemcpy(u,u_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(v,v_d,sizeof(float)*width*height, hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(u_d) ); checkCudaErrors( hipFree(v_d) ); checkCudaErrors( hipFree(warpIm2_d) ); checkCudaErrors( hipFree(Im1_d) ); checkCudaErrors( hipFree(Im2_d) ); checkCudaErrors( hipFree(pre_u_d) ); checkCudaErrors( hipFree(pre_v_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; pre_u_d = 0; pre_v_d = 0; hipEventRecord(stop,0); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&time,start,stop); return time; } } #endif
34c917417f160708bdb16348f401b9f51efc4c81.cu
#ifndef _ZQ_CUDA_OPTICAL_FLOW_2D_UTILS_CU_ #define _ZQ_CUDA_OPTICAL_FLOW_2D_UTILS_CU_ #include "ZQ_CUDA_OpticalFlow2D_Utils.cuh" #include "ZQ_CUDA_ImageProcessing2D.cuh" #include "ZQ_CUDA_PoissonSolver2D.cuh" namespace ZQ_CUDA_OpticalFlow2D { /**************** Base Kernels **********************************/ __global__ void compute_psi_data_Kernel(float* psi_data, const float* imdx, const float* imdy, const float* imdt, const float* du, const float* dv, const float eps, const int width, const int height, const int nChannels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; float value = 0; for(int i = 0;i < nChannels;i++) { float tmp = (imdt[offset*nChannels+i]+imdx[offset*nChannels+i]*du[offset]+imdy[offset*nChannels+i]*dv[offset]); value += tmp*tmp; } psi_data[offset] = 0.5/sqrt(value+eps); } __global__ void compute_psi_smooth_Kernel(float* psi_smooth, const float* u, const float* v, const float eps, const int width, const int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; float ux = (x < width-1) ? (u[offset+1]-u[offset]) : 0; float uy = (y < height-1) ? (u[offset+width]-u[offset]) : 0; float vx = (x < width-1) ? (v[offset+1]-v[offset]) : 0; float vy = (y < height-1) ? (v[offset+width]-v[offset]) : 0; psi_smooth[offset] = 0.5/sqrt(ux*ux+uy*uy+vx*vx+vy*vy+eps); } __global__ void compute_psi_u_v_Kernel(float* psi_u, float* psi_v, const float* u, const float* v, const float eps, const int width, const int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; psi_u[offset] = 0.5/sqrt(u[offset]*u[offset]+eps); psi_v[offset] = 0.5/sqrt(v[offset]*v[offset]+eps); } __global__ void compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel(float* imdxdx, float* imdxdy, float* imdydy, float* imdtdx, float* imdtdy, const float* imdx, const float* imdy, const float* imdt, const int width, const int height, const int nChannels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; imdxdx[offset] = 0; imdxdy[offset] = 0; imdydy[offset] = 0; imdtdx[offset] = 0; imdtdy[offset] = 0; for(int c = 0; c < nChannels;c++) { imdxdx[offset] += imdx[offset*nChannels+c]*imdx[offset*nChannels+c]; imdxdy[offset] += imdx[offset*nChannels+c]*imdy[offset*nChannels+c]; imdydy[offset] += imdy[offset*nChannels+c]*imdy[offset*nChannels+c]; imdtdx[offset] += imdt[offset*nChannels+c]*imdx[offset*nChannels+c]; imdtdy[offset] += imdt[offset*nChannels+c]*imdy[offset*nChannels+c]; } imdxdx[offset] /= nChannels; imdxdy[offset] /= nChannels; imdydy[offset] /= nChannels; imdtdx[offset] /= nChannels; imdtdy[offset] /= nChannels; } __global__ void compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel(float* imdxdx, float* imdxdy, float* imdydy, float* imdtdx, float* imdtdy, const float* imdx, const float* imdy, const float* imdt, const float* psi_data, const int width, const int height, const int nChannels) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; imdxdx[offset] = 0; imdxdy[offset] = 0; imdydy[offset] = 0; imdtdx[offset] = 0; imdtdy[offset] = 0; for(int c = 0; c < nChannels;c++) { imdxdx[offset] += imdx[offset*nChannels+c]*imdx[offset*nChannels+c]; imdxdy[offset] += imdx[offset*nChannels+c]*imdy[offset*nChannels+c]; imdydy[offset] += imdy[offset*nChannels+c]*imdy[offset*nChannels+c]; imdtdx[offset] += imdt[offset*nChannels+c]*imdx[offset*nChannels+c]; imdtdy[offset] += imdt[offset*nChannels+c]*imdy[offset*nChannels+c]; } imdxdx[offset] *= psi_data[offset]/nChannels; imdxdy[offset] *= psi_data[offset]/nChannels; imdydy[offset] *= psi_data[offset]/nChannels; imdtdx[offset] *= psi_data[offset]/nChannels; imdtdy[offset] *= psi_data[offset]/nChannels; } __global__ void Laplacian_withpsismooth_Kernel(float* output, const float* input,const float* psi_smooth, const int width, const int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; float value = 0; float in_x,in_x_,in_y,in_y_; in_x = (x < width-1) ? (input[offset+1] - input[offset]) : 0 ; in_x_ = (x > 0) ? (input[offset] - input[offset-1]) : 0; value += (x > 0) ? (psi_smooth[offset]*in_x - psi_smooth[offset-1]*in_x_) : 0; in_y = (y < height-1) ? (input[offset+width] - input[offset]) : 0; in_y_ = (y > 0) ? (input[offset] - input[offset-width]) : 0; value += (y > 0) ? (psi_smooth[offset]*in_y - psi_smooth[offset-width]*in_y_) : 0; output[offset] = value; } __global__ void OpticalFlow_L2_RedBlack_Kernel(float* du, float* dv, const float* u, const float* v, const float* imdxdx, const float* imdxdy, const float* imdydy, const float* imdtdx, const float* imdtdy, const float* laplace_u, const float* laplace_v, const int width, const int height, const float alpha, const float beta, const float omega, const bool redKernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; if(rest == (redKernel ? 1 : 0)) return; int i = y; int j = x; int offset = i * width + j; float sigma1 = 0, sigma2 = 0, coeff = 0; float _weight; if(j > 0) { _weight = 1; sigma1 += _weight*du[offset-1]; sigma2 += _weight*dv[offset-1]; coeff += _weight; } if(j < width-1) { _weight = 1; sigma1 += _weight*du[offset+1]; sigma2 += _weight*dv[offset+1]; coeff += _weight; } if(i > 0) { _weight = 1; sigma1 += _weight*du[offset-width]; sigma2 += _weight*dv[offset-width]; coeff += _weight; } if(i < height-1) { _weight = 1; sigma1 += _weight*du[offset+width]; sigma2 += _weight*dv[offset+width]; coeff += _weight; } sigma1 *= alpha; sigma2 *= alpha; coeff *= alpha; // compute u sigma1 += alpha*laplace_u[offset] - imdtdx[offset] - imdxdy[offset]*dv[offset] - beta*u[offset]; float coeff1 = coeff + imdxdx[offset] + beta; du[offset] = (1-omega)*du[offset] + omega/coeff1*sigma1; // compute v sigma2 += alpha*laplace_v[offset] - imdtdy[offset] - imdxdy[offset]*du[offset] - beta*v[offset]; float coeff2 = coeff + imdydy[offset] + beta; dv[offset] = (1-omega)*dv[offset] + omega/coeff2*sigma2; } __global__ void OpticalFlow_L1_RedBlack_Kernel(float* du, float* dv, const float* u, const float* v, const float* imdxdx, const float* imdxdy, const float* imdydy, const float* imdtdx, const float* imdtdy, const float* laplace_u, const float* laplace_v,const float* psi_smooth, const float* psi_u, const float* psi_v, const int width, const int height, const float alpha, const float beta, const float omega, const bool redKernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; if(rest == (redKernel ? 1 : 0)) return; int i = y; int j = x; int offset = i * width + j; float sigma1 = 0, sigma2 = 0, coeff = 0; float _weight; if(j > 0) { _weight = psi_smooth[offset-1]; sigma1 += _weight*du[offset-1]; sigma2 += _weight*dv[offset-1]; coeff += _weight; } if(j < width-1) { _weight = psi_smooth[offset]; sigma1 += _weight*du[offset+1]; sigma2 += _weight*dv[offset+1]; coeff += _weight; } if(i > 0) { _weight = psi_smooth[offset-width]; sigma1 += _weight*du[offset-width]; sigma2 += _weight*dv[offset-width]; coeff += _weight; } if(i < height-1) { _weight = psi_smooth[offset]; sigma1 += _weight*du[offset+width]; sigma2 += _weight*dv[offset+width]; coeff += _weight; } sigma1 *= alpha; sigma2 *= alpha; coeff *= alpha; // compute u sigma1 += alpha*laplace_u[offset] - imdtdx[offset] - imdxdy[offset]*dv[offset] - beta*psi_u[offset]*u[offset]; float coeff1 = coeff + imdxdx[offset] + beta*psi_u[offset]; du[offset] = (1-omega)*du[offset] + omega/coeff1*sigma1; // compute v sigma2 += alpha*laplace_v[offset] - imdtdy[offset] - imdxdy[offset]*du[offset] - beta*psi_v[offset]*v[offset]; float coeff2 = coeff + imdydy[offset] + beta*psi_v[offset]; dv[offset] = (1-omega)*dv[offset] + omega/coeff2*sigma2; } /******************************* for ADMM ********************************************/ __global__ void proximalF_RedBlack_Kernel(float* du, float* dv, const float* imdxdx, const float* imdxdy, const float* imdydy, const float* imdtdx, const float* imdtdy, const float* laplace_u, const float* laplace_v, const float* u, const float* z_u, const float* v, const float* z_v, const int width, const int height, const float alpha, const float beta, const float lambda, const float omega, const bool redKernel) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int rest = (x+y)%2; if(rest == (redKernel ? 1 : 0)) return; int i = y; int j = x; int offset = i * width + j; float sigma1 = 0, sigma2 = 0, coeff = 0; float _weight; if(j > 0) { _weight = 1; sigma1 += _weight*du[offset-1]; sigma2 += _weight*dv[offset-1]; coeff += _weight; } if(j < width-1) { _weight = 1; sigma1 += _weight*du[offset+1]; sigma2 += _weight*dv[offset+1]; coeff += _weight; } if(i > 0) { _weight = 1; sigma1 += _weight*du[offset-width]; sigma2 += _weight*dv[offset-width]; coeff += _weight; } if(i < height-1) { _weight = 1; sigma1 += _weight*du[offset+width]; sigma2 += _weight*dv[offset+width]; coeff += _weight; } sigma1 *= alpha; sigma2 *= alpha; coeff *= alpha; // compute u sigma1 += alpha*laplace_u[offset] - imdtdx[offset] - imdxdy[offset]*dv[offset] - beta*u[offset] - 0.5*lambda*(u[offset] - z_u[offset]); float coeff1 = coeff + imdxdx[offset] + beta + 0.5*lambda; du[offset] = (1-omega)*du[offset] + omega/coeff1*sigma1; // compute v sigma2 += alpha*laplace_v[offset] - imdtdy[offset] - imdxdy[offset]*du[offset] - beta*v[offset] - 0.5*lambda*(v[offset] - z_v[offset]); float coeff2 = coeff + imdydy[offset] + beta + 0.5*lambda; dv[offset] = (1-omega)*dv[offset] + omega/coeff2*sigma2; } __global__ void proximal_F2_Kernel(float* u, float* v, const float* z_u, const float* z_v, const float* warpU, const float* warpV, const int width, const int height, const float gama, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; u[offset] = (gama*warpU[offset]+0.5*lambda*z_u[offset])/(gama+0.5*lambda); v[offset] = (gama*warpV[offset]+0.5*lambda*z_v[offset])/(gama+0.5*lambda); } __global__ void compute_z_u_z_v_for_proximal_F1_Kernel(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_q1, const float* v_for_q1, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; z_u[offset] = u_for_F1[offset] - 1.0/lambda*u_for_q1[offset]; z_v[offset] = v_for_F1[offset] - 1.0/lambda*v_for_q1[offset]; } __global__ void compute_z_u_z_v_for_proximal_F2_Kernel(float* z_u, float* z_v, const float* u_for_F2, const float* v_for_F2, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; z_u[offset] = u_for_F2[offset] - 1.0/lambda*u_for_q2[offset]; z_v[offset] = v_for_F2[offset] - 1.0/lambda*v_for_q2[offset]; } __global__ void compute_z_u_z_v_for_proximal_G_Kernel(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_q1, const float* v_for_q1, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; z_u[offset] = 0.5*(u_for_F1[offset] + 1.0/lambda*u_for_q1[offset] + u_for_F2[offset] + 1.0/lambda*u_for_q2[offset]); z_v[offset] = 0.5*(v_for_F1[offset] + 1.0/lambda*v_for_q1[offset] + v_for_F2[offset] + 1.0/lambda*v_for_q2[offset]); } __global__ void update_u_v_for_q1_q2_Kernel(float* u_for_q1, float* v_for_q1, float* u_for_q2, float* v_for_q2, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_G, const float* v_for_G, const int width, const int height, const float lambda) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x >= width || y >= height) return ; int offset = y*width+x; u_for_q1[offset] += lambda*(u_for_F1[offset] - u_for_G[offset]); v_for_q1[offset] += lambda*(v_for_F1[offset] - v_for_G[offset]); u_for_q2[offset] += lambda*(u_for_F2[offset] - u_for_G[offset]); v_for_q2[offset] += lambda*(v_for_F2[offset] - v_for_G[offset]); } /****************************************************************************************************/ void cu_Compute_z_u_z_v_for_proximal_F1(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_q1, const float* v_for_q1, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); compute_z_u_z_v_for_proximal_F1_Kernel<<<gridSize,blockSize>>>(z_u,z_v,u_for_F1,v_for_F1,u_for_q1,v_for_q1,width,height,lambda); } void cu_Compute_z_u_z_v_for_proximal_F2(float* z_u, float* z_v, const float* u_for_F2, const float* v_for_F2, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); compute_z_u_z_v_for_proximal_F2_Kernel<<<gridSize,blockSize>>>(z_u,z_v,u_for_F2,v_for_F2,u_for_q2,v_for_q2,width,height,lambda); } void cu_Compute_z_u_z_v_for_proximal_G(float* z_u, float* z_v, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_q1, const float* v_for_q1, const float* u_for_q2, const float* v_for_q2, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); compute_z_u_z_v_for_proximal_G_Kernel<<<gridSize,blockSize>>>(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2, u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,lambda); } void cu_Update_u_v_for_q1_q2(float* u_for_q1, float* v_for_q1, float* u_for_q2, float* v_for_q2, const float* u_for_F1, const float* v_for_F1, const float* u_for_F2, const float* v_for_F2, const float* u_for_G, const float* v_for_G, const int width, const int height, const float lambda) { dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); update_u_v_for_q1_q2_Kernel<<<gridSize,blockSize>>>(u_for_q1,v_for_q1,u_for_q2,v_for_q2, u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,lambda); } void cu_GetDerivatives(float* imdx, float* imdy, float* imdt, const float* Im1, const float* Im2, const int width, const int height, const int nChannels) { float* tmpBuf = 0; float* im1 = 0; float* im2 = 0; checkCudaErrors( cudaMalloc((void**)&tmpBuf, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&im1, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&im2, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(tmpBuf,0,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(im1,0,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(im2,0,sizeof(float)*width*height*nChannels) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); ZQ_CUDA_ImageProcessing2D::Imfilter_h_Gaussian_Kernel<<<gridSize,blockSize>>>(tmpBuf,Im1,width,height,nChannels); ZQ_CUDA_ImageProcessing2D::Imfilter_v_Gaussian_Kernel<<<gridSize,blockSize>>>(im1,tmpBuf,width,height,nChannels); ZQ_CUDA_ImageProcessing2D::Imfilter_h_Gaussian_Kernel<<<gridSize,blockSize>>>(tmpBuf,Im2,width,height,nChannels); ZQ_CUDA_ImageProcessing2D::Imfilter_v_Gaussian_Kernel<<<gridSize,blockSize>>>(im2,tmpBuf,width,height,nChannels); /* tmpBuf = im1*0.4 + im2*0.6 */ ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(tmpBuf,im1,0.4,im2,0.6,width,height,nChannels); /* imdx = \partial_x {tmpBuf} */ ZQ_CUDA_ImageProcessing2D::Derivative_x_Advanced_Kernel<<<gridSize,blockSize>>>(imdx,tmpBuf,width,height,nChannels); /* imdy = \partial_y {tmpBuf} */ ZQ_CUDA_ImageProcessing2D::Derivative_y_Advanced_Kernel<<<gridSize,blockSize>>>(imdy,tmpBuf,width,height,nChannels); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(imdt,im2,1,im1,-1,width,height,nChannels); checkCudaErrors( cudaFree(tmpBuf) ); checkCudaErrors( cudaFree(im1) ); checkCudaErrors( cudaFree(im2) ); tmpBuf = 0; im1 = 0; im2 = 0; } /*alpha: penality for velocity gradient*/ void cu_OpticalFlow_L2(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( cudaMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdy, 0, sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel<<<gridSize,blockSize>>>(imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_u,u,width,height,1); ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_v,v,width,height,1); // set omega float omega = 1.0f; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { OpticalFlow_L2_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,true); OpticalFlow_L2_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,false); } /* red - black solver end */ ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( cudaFree(du) ); checkCudaErrors( cudaFree(dv) ); checkCudaErrors( cudaFree(laplace_u) ); checkCudaErrors( cudaFree(laplace_v) ); checkCudaErrors( cudaFree(imdx) ); checkCudaErrors( cudaFree(imdy) ); checkCudaErrors( cudaFree(imdt) ); checkCudaErrors( cudaFree(imdxdx) ); checkCudaErrors( cudaFree(imdxdy) ); checkCudaErrors( cudaFree(imdydy) ); checkCudaErrors( cudaFree(imdtdx) ); checkCudaErrors( cudaFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } /*alpha: penality for velocity gradient*/ void cu_OpticalFlow_L2_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( cudaMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdy, 0, sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel<<<gridSize,blockSize>>>(imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_u,u,width,height,1); ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_v,v,width,height,1); // set omega float omega = 1.0f; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { OpticalFlow_L2_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,true); OpticalFlow_L2_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,false); } /* red - black solver end */ ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); checkCudaErrors( cudaFree(du) ); checkCudaErrors( cudaFree(dv) ); checkCudaErrors( cudaFree(laplace_u) ); checkCudaErrors( cudaFree(laplace_v) ); checkCudaErrors( cudaFree(imdx) ); checkCudaErrors( cudaFree(imdy) ); checkCudaErrors( cudaFree(imdt) ); checkCudaErrors( cudaFree(imdxdx) ); checkCudaErrors( cudaFree(imdxdy) ); checkCudaErrors( cudaFree(imdydy) ); checkCudaErrors( cudaFree(imdtdx) ); checkCudaErrors( cudaFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } /*alpha: penality for velocity gradient*/ void cu_OpticalFlow_L1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter,const int nSORIter) { float eps = optical_flow_L1_eps; float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; float* psi_data = 0; float* psi_smooth = 0; float* psi_u = 0; float* psi_v = 0; checkCudaErrors( cudaMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&psi_data,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&psi_smooth,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&psi_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&psi_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(psi_data,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(psi_smooth,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(psi_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(psi_v,0,sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nInnerFPIterations = nInnerFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); for(int inner_it = 0; inner_it < nInnerFPIterations;inner_it++) { /* compute psi_data*/ compute_psi_data_Kernel<<<gridSize,blockSize>>>(psi_data,imdx,imdy,imdt,du,dv,eps,width,height,nChannels); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel<<<gridSize,blockSize>>>(imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,psi_data,width,height,nChannels); /*compute psi_smooth*/ ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); compute_psi_smooth_Kernel<<<gridSize,blockSize>>>(psi_smooth,u,v,eps,width,height); compute_psi_u_v_Kernel<<<gridSize,blockSize>>>(psi_u,psi_v,u,v,eps,width,height); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,-1,width,height,1); /* laplace u, v with psi_smooth */ Laplacian_withpsismooth_Kernel<<<gridSize,blockSize>>>(laplace_u,u,psi_smooth,width,height); Laplacian_withpsismooth_Kernel<<<gridSize,blockSize>>>(laplace_v,v,psi_smooth,width,height); // set omega float omega = 1.0f; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { OpticalFlow_L1_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,psi_smooth, psi_u,psi_v,width,height,alpha,beta,omega,true); OpticalFlow_L1_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,psi_smooth, psi_u,psi_v,width,height,alpha,beta,omega,false); } /* red - black solver end */ } ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( cudaFree(du) ); checkCudaErrors( cudaFree(dv) ); checkCudaErrors( cudaFree(laplace_u) ); checkCudaErrors( cudaFree(laplace_v) ); checkCudaErrors( cudaFree(imdx) ); checkCudaErrors( cudaFree(imdy) ); checkCudaErrors( cudaFree(imdt) ); checkCudaErrors( cudaFree(imdxdx) ); checkCudaErrors( cudaFree(imdxdy) ); checkCudaErrors( cudaFree(imdydy) ); checkCudaErrors( cudaFree(imdtdx) ); checkCudaErrors( cudaFree(imdtdy) ); checkCudaErrors( cudaFree(psi_data) ); checkCudaErrors( cudaFree(psi_smooth) ); checkCudaErrors( cudaFree(psi_u) ); checkCudaErrors( cudaFree(psi_v) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; psi_data = 0; psi_smooth = 0; psi_u = 0; psi_v = 0; } void cu_OpticalFlow_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter) { float eps = optical_flow_L1_eps; float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; float* psi_data = 0; checkCudaErrors( cudaMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&psi_data,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(psi_data,0,sizeof(float)*width*height) ); int nOuterFPIterations = nOuterFPIter; int nInnerFPIterations = nInnerFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); for(int inner_it = 0; inner_it < nInnerFPIterations;inner_it++) { /* compute psi_data*/ compute_psi_data_Kernel<<<gridSize,blockSize>>>(psi_data,imdx,imdy,imdt,du,dv,eps,width,height,nChannels); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel<<<gridSize,blockSize>>>(imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,psi_data, width,height,nChannels); /* laplace u, v with psi_smooth */ ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_u,u,width,height,1); ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { OpticalFlow_L2_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,true); OpticalFlow_L2_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,u,v,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v, width,height,alpha2,beta2,omega,false); } /* red - black solver end */ } ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( cudaFree(du) ); checkCudaErrors( cudaFree(dv) ); checkCudaErrors( cudaFree(laplace_u) ); checkCudaErrors( cudaFree(laplace_v) ); checkCudaErrors( cudaFree(imdx) ); checkCudaErrors( cudaFree(imdy) ); checkCudaErrors( cudaFree(imdt) ); checkCudaErrors( cudaFree(imdxdx) ); checkCudaErrors( cudaFree(imdxdy) ); checkCudaErrors( cudaFree(imdydy) ); checkCudaErrors( cudaFree(imdtdx) ); checkCudaErrors( cudaFree(imdtdy) ); checkCudaErrors( cudaFree(psi_data) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; psi_data = 0; } void cu_Proximal_F1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* z_u, const float* z_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( cudaMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdy, 0, sizeof(float)*width*height) ); /* ProximalF(z_u,z_v,\lambda) = minimize_{u,v} \int {|I_2(x+u,y+v)-I_1(x,y)|^2} + \alpha^2 \int {|\nabla u|^2 + |\nabla v|^2} + \beta^2 \int {|u|^2 + |v|^2} + \lambda \int {|u-z_u|^2 + |v-z_v|^2} * * The Euler-Lagrange equation is: * I_t I_x + \beta^2 u + \lambda(u-z_u) = \alpha^2 \Delta u * I_t I_y + \beta^2 v + \lambda(v-z_v) = \alpha^2 \Delta v */ int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel<<<gridSize,blockSize>>>(imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_u,u,width,height,1); ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { proximalF_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,true); proximalF_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,false); } /* red - black solver end */ ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( cudaFree(du) ); checkCudaErrors( cudaFree(dv) ); checkCudaErrors( cudaFree(laplace_u) ); checkCudaErrors( cudaFree(laplace_v) ); checkCudaErrors( cudaFree(imdx) ); checkCudaErrors( cudaFree(imdy) ); checkCudaErrors( cudaFree(imdt) ); checkCudaErrors( cudaFree(imdxdx) ); checkCudaErrors( cudaFree(imdxdy) ); checkCudaErrors( cudaFree(imdydy) ); checkCudaErrors( cudaFree(imdtdx) ); checkCudaErrors( cudaFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } void cu_Proximal_F1_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* z_u, const float* z_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int nOuterFPIter, const int nSORIter) { float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; checkCudaErrors( cudaMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdy, 0, sizeof(float)*width*height) ); /* ProximalF(z_u,z_v,\lambda) = minimize_{u,v} \int {|I_2(x+u,y+v)-I_1(x,y)|^2} + \alpha^2 \int {|\nabla u|^2 + |\nabla v|^2} + \beta^2 \int {|u|^2 + |v|^2} + \lambda \int {|u-z_u|^2 + |v-z_v|^2} * * The Euler-Lagrange equation is: * I_t I_x + \beta^2 u + \lambda(u-z_u) = \alpha^2 \Delta u * I_t I_y + \beta^2 v + \lambda(v-z_v) = \alpha^2 \Delta v */ int nOuterFPIterations = nOuterFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_Kernel<<<gridSize,blockSize>>>(imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,width,height,nChannels); /* laplace u, v */ ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_u,u,width,height,1); ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { proximalF_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,true); proximalF_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,false); } /* red - black solver end */ ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Occupy_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,occupy,u,v,width,height,nChannels); checkCudaErrors( cudaFree(du) ); checkCudaErrors( cudaFree(dv) ); checkCudaErrors( cudaFree(laplace_u) ); checkCudaErrors( cudaFree(laplace_v) ); checkCudaErrors( cudaFree(imdx) ); checkCudaErrors( cudaFree(imdy) ); checkCudaErrors( cudaFree(imdt) ); checkCudaErrors( cudaFree(imdxdx) ); checkCudaErrors( cudaFree(imdxdy) ); checkCudaErrors( cudaFree(imdydy) ); checkCudaErrors( cudaFree(imdtdx) ); checkCudaErrors( cudaFree(imdtdy) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; } void cu_Proximal_F1_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* z_u, const float* z_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter) { float eps = optical_flow_L1_eps; float* du = 0; float* dv = 0; float* laplace_u = 0; float* laplace_v = 0; float* imdx = 0; float* imdy = 0; float* imdt = 0; float* imdxdx = 0; float* imdxdy = 0; float* imdydy = 0; float* imdtdx = 0; float* imdtdy = 0; float* psi_data = 0; checkCudaErrors( cudaMalloc((void**)&du, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&dv, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_u, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&laplace_v, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdx, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdy, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdt, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&imdxdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdxdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdydy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdx, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&imdtdy, sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&psi_data, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_u, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(laplace_v, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdx, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdy, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdt, 0, sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemset(imdxdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdxdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdydy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdx, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(imdtdy, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(psi_data,0, sizeof(float)*width*height) ); /* ProximalF(z_u,z_v,\lambda) = minimize_{u,v} \int {|I_2(x+u,y+v)-I_1(x,y)|^2} + \alpha^2 \int {|\nabla u|^2 + |\nabla v|^2} + \beta^2 \int {|u|^2 + |v|^2} + \lambda \int {|u-z_u|^2 + |v-z_v|^2} * * The Euler-Lagrange equation is: * I_t I_x + \beta^2 u + \lambda(u-z_u) = \alpha^2 \Delta u * I_t I_y + \beta^2 v + \lambda(v-z_v) = \alpha^2 \Delta v */ int nOuterFPIterations = nOuterFPIter; int nInnerFPIterations = nInnerFPIter; int nSORIterations = nSORIter; dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); /************ Outer Loop Begin *************/ //refresh {u,v} in each loop for(int count = 0; count < nOuterFPIterations;count++) { /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); /* get imdx, imdy, imdt*/ cu_GetDerivatives(imdx,imdy,imdt,Im1,warpIm2,width,height,nChannels); /* reset du, dv */ checkCudaErrors( cudaMemset(du, 0, sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(dv, 0, sizeof(float)*width*height) ); for(int inner_it = 0; inner_it < nInnerFPIterations; inner_it++) { /*compute psi_data*/ compute_psi_data_Kernel<<<gridSize,blockSize>>>(psi_data,imdx,imdy,imdt,du,dv,eps,width,height,nChannels); /* compute imdxdx, imdxdy, imdydy, imdtdx, imdtdy */ compute_imdxdx_imdxdy_imdydy_imdtdx_imtdy_withpsidata_Kernel<<<gridSize,blockSize>>>(imdxdx,imdxdy,imdydy,imdtdx,imdtdy,imdx,imdy,imdt,psi_data, width,height,nChannels); /* laplace u, v */ ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_u,u,width,height,1); ZQ_CUDA_ImageProcessing2D::Laplacian_Kernel<<<gridSize,blockSize>>>(laplace_v,v,width,height,1); // set omega float omega = 1.0; float alpha2 = alpha*alpha; float beta2 = beta*beta; /* red - black solver begin */ for(int sor_it = 0;sor_it < nSORIterations;sor_it++) { proximalF_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,true); proximalF_RedBlack_Kernel<<<gridSize,blockSize>>>(du,dv,imdxdx,imdxdy,imdydy,imdtdx,imdtdy,laplace_u,laplace_v,u,z_u,v,z_v, width,height,alpha2,beta2,lambda,omega,false); } /* red - black solver end */ } ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u,du,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v,dv,1,width,height,1); } /************ Outer Loop End *************/ /* warp image bicubic */ ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpIm2,Im1,Im2,u,v,width,height,nChannels); checkCudaErrors( cudaFree(du) ); checkCudaErrors( cudaFree(dv) ); checkCudaErrors( cudaFree(laplace_u) ); checkCudaErrors( cudaFree(laplace_v) ); checkCudaErrors( cudaFree(imdx) ); checkCudaErrors( cudaFree(imdy) ); checkCudaErrors( cudaFree(imdt) ); checkCudaErrors( cudaFree(imdxdx) ); checkCudaErrors( cudaFree(imdxdy) ); checkCudaErrors( cudaFree(imdydy) ); checkCudaErrors( cudaFree(imdtdx) ); checkCudaErrors( cudaFree(imdtdy) ); checkCudaErrors( cudaFree(psi_data) ); du = 0; dv = 0; laplace_u = 0; laplace_v = 0; imdx = 0; imdy = 0; imdt = 0; imdxdx = 0; imdxdy = 0; imdydy = 0; imdtdx = 0; imdtdy = 0; psi_data = 0; } void cu_Proximal_F2_first(float* u, float* v, const float* z_u, const float* z_v, const float* next_u, const float* next_v, const int width, const int height, const float gama, const float lambda, const int nFPIter, const int nPoissonIter) { int nOuterFPIterations = nFPIter; int nPoissonIterations = nPoissonIter; float* warpU = 0; float* warpV = 0; checkCudaErrors( cudaMalloc((void**)&warpU,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpV,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpU,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpV,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int out_it = 0;out_it < nOuterFPIterations;out_it++) { ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpU,u,next_u,u,v,width,height,1); ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpV,v,next_v,u,v,width,height,1); ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(warpU,warpV,width,height,nPoissonIterations); proximal_F2_Kernel<<<gridSize,blockSize>>>(u,v,z_u,z_v,warpU,warpV,width,height,gama,lambda); } checkCudaErrors( cudaFree(warpU) ); checkCudaErrors( cudaFree(warpV) ); warpU = 0; warpV = 0; } void cu_Proximal_F2_middle(float* u, float* v, const float* z_u, const float* z_v, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const float gama, const float lambda, const int nFPIter, const int nPoissonIter) { int nOuterFPIterations = nFPIter; int nPoissonIterations = nPoissonIter; float* warpU_pre = 0; float* warpV_pre = 0; float* warpU_nex = 0; float* warpV_nex = 0; float* tmp_u = 0; float* tmp_v = 0; checkCudaErrors( cudaMalloc((void**)&warpU_pre,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpV_pre,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpU_nex,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpV_nex,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&tmp_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&tmp_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpU_pre,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpV_pre,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpU_nex,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpV_nex,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int out_it = 0;out_it < nOuterFPIterations;out_it++) { ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpU_nex,u,next_u,u,v,width,height,1); ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpV_nex,v,next_v,u,v,width,height,1); checkCudaErrors( cudaMemset(tmp_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(tmp_v,0,sizeof(float)*width*height) ); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(tmp_u,u,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(tmp_v,v,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpU_pre,u,pre_u,tmp_u,tmp_v,width,height,1); ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpV_pre,v,pre_v,tmp_u,tmp_v,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(tmp_u,warpU_pre,0.5,warpU_nex,0.5,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(tmp_v,warpV_pre,0.5,warpV_nex,0.5,width,height,1); ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(tmp_u,tmp_v,width,height,nPoissonIterations); proximal_F2_Kernel<<<gridSize,blockSize>>>(u,v,z_u,z_v,tmp_u,tmp_v,width,height,2*gama,lambda); } checkCudaErrors( cudaFree(warpU_pre) ); checkCudaErrors( cudaFree(warpV_pre) ); checkCudaErrors( cudaFree(warpU_nex) ); checkCudaErrors( cudaFree(warpV_nex) ); checkCudaErrors( cudaFree(tmp_u) ); checkCudaErrors( cudaFree(tmp_v) ); warpU_pre = 0; warpV_pre = 0; warpU_nex = 0; warpV_nex = 0; tmp_u = 0; tmp_v = 0; } void cu_Proximal_F2_last(float* u, float* v, const float* z_u, const float* z_v, const float* pre_u, const float* pre_v, const int width, const int height, const float gama, const float lambda, const int nFPIter, const int nPoissonIter) { int nOuterFPIterations = nFPIter; int nPoissonIterations = nPoissonIter; float* warpU = 0; float* warpV = 0; float* tmp_u = 0; float* tmp_v = 0; checkCudaErrors( cudaMalloc((void**)&warpU,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpV,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&tmp_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&tmp_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpU,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(warpV,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int out_it = 0;out_it < nOuterFPIterations;out_it++) { checkCudaErrors( cudaMemset(tmp_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(tmp_v,0,sizeof(float)*width*height) ); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(tmp_u,u,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(tmp_v,v,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpU,u,pre_u,tmp_u,tmp_v,width,height,1); ZQ_CUDA_ImageProcessing2D::WarpImage_Bicubic_Kernel<<<gridSize,blockSize>>>(warpV,v,pre_v,tmp_u,tmp_v,width,height,1); ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(warpU,warpV,width,height,nPoissonIterations); proximal_F2_Kernel<<<gridSize,blockSize>>>(u,v,z_u,z_v,warpU,warpV,width,height,gama,lambda); } checkCudaErrors( cudaFree(warpU) ); checkCudaErrors( cudaFree(warpV) ); checkCudaErrors( cudaFree(tmp_u) ); checkCudaErrors( cudaFree(tmp_v) ); warpU = 0; warpV = 0; tmp_u = 0; tmp_v = 0; } void cu_Proximal_G(float* u, float* v, const float* z_u, const float* z_v, const int width, const int height, const int nPoissonIter) { checkCudaErrors( cudaMemcpy(u,z_u,sizeof(float)*width*height, cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v,z_v,sizeof(float)*width*height, cudaMemcpyDeviceToDevice) ); int nPoissonIterations = nPoissonIter; ZQ_CUDA_PoissonSolver2D::cu_SolveOpenPoissonRedBlack_Regular(u,v,width,height,nPoissonIterations); } void cu_OpticalFlow_ADMM(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float* u_for_F = u; float* v_for_F = v; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q = 0; float* v_for_q = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int it = 0;it < ADMMIter;it++) { ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_u,u_for_G,1,u_for_q,-1.0,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_v,v_for_G,1,v_for_q,-1.0,width,height,1); cu_Proximal_F1(u_for_F,v_for_F,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_u,u_for_F,1,u_for_q,1.0,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_v,v_for_F,1,v_for_q,1.0,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u_for_q,u_for_F,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u_for_q,u_for_G,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v_for_q,v_for_F,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v_for_q,v_for_G,-1,width,height,1); } checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q) ); checkCudaErrors( cudaFree(v_for_q) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F = 0; v_for_F = 0; u_for_G = 0; v_for_G = 0; u_for_q = 0; v_for_q = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float* u_for_F = u; float* v_for_F = v; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q = 0; float* v_for_q = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int it = 0;it < ADMMIter;it++) { ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_u,u_for_G,1,u_for_q,-1.0,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_v,v_for_G,1,v_for_q,-1.0,width,height,1); cu_Proximal_F1_Occupy(u_for_F,v_for_F,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_u,u_for_F,1,u_for_q,1.0,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_v,v_for_F,1,v_for_q,1.0,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u_for_q,u_for_F,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u_for_q,u_for_G,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v_for_q,v_for_F,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v_for_q,v_for_G,-1,width,height,1); } checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q) ); checkCudaErrors( cudaFree(v_for_q) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F = 0; v_for_F = 0; u_for_G = 0; v_for_G = 0; u_for_q = 0; v_for_q = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nPoissonIter) { float* u_for_F = u; float* v_for_F = v; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q = 0; float* v_for_q = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE); dim3 gridSize((width+blockSize.x-1)/blockSize.x,(height+blockSize.y-1)/blockSize.y); for(int it = 0;it < ADMMIter;it++) { ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_u,u_for_G,1,u_for_q,-1.0,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_v,v_for_G,1,v_for_q,-1.0,width,height,1); cu_Proximal_F1_DL1(u_for_F,v_for_F,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_u,u_for_F,1,u_for_q,1.0,width,height,1); ZQ_CUDA_ImageProcessing2D::Add_Im1_weight1_Im2_weight2_Kernel<<<gridSize,blockSize>>>(z_v,v_for_F,1,v_for_q,1.0,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u_for_q,u_for_F,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(u_for_q,u_for_G,-1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v_for_q,v_for_F,1,width,height,1); ZQ_CUDA_ImageProcessing2D::Addwith_Kernel<<<gridSize,blockSize>>>(v_for_q,v_for_G,-1,width,height,1); } checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q) ); checkCudaErrors( cudaFree(v_for_q) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F = 0; v_for_F = 0; u_for_G = 0; v_for_G = 0; u_for_q = 0; v_for_q = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_first(u_for_F2,v_for_F2,z_u,z_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_First_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_Occupy(u_for_F1,v_for_F1,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_first(u_for_F2,v_for_F2,z_u,z_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_DL1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_first(u_for_F2,v_for_F2,z_u,z_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_middle(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Middle_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_Occupy(u_for_F1,v_for_F1,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_middle(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_DL1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_middle(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,next_u,next_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_last(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_Last_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_Occupy(u_for_F1,v_for_F1,warpIm2,Im1,Im2,occupy,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_last(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } void cu_OpticalFlow_ADMM_DL1_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float* u_for_F1 = u; float* v_for_F1 = v; float* u_for_F2 = 0; float* v_for_F2 = 0; float* u_for_G = 0; float* v_for_G = 0; float* u_for_q1 = 0; float* v_for_q1 = 0; float* u_for_q2 = 0; float* v_for_q2 = 0; float* z_u = 0; float* z_v = 0; checkCudaErrors( cudaMalloc((void**)&u_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_F2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_G,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q1,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_for_q2,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_u,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&z_v,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(u_for_F2,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_F2,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(u_for_G,u_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemcpy(v_for_G,v_for_F1,sizeof(float)*width*height,cudaMemcpyDeviceToDevice) ); checkCudaErrors( cudaMemset(u_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q1,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(u_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(v_for_q2,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_u,0,sizeof(float)*width*height) ); checkCudaErrors( cudaMemset(z_v,0,sizeof(float)*width*height) ); float new_gamma = gamma*alpha*alpha; for(int it = 0;it < ADMMIter;it++) { cu_Compute_z_u_z_v_for_proximal_F1(z_u,z_v,u_for_G,v_for_G,u_for_q1,v_for_q1,width,height,1); cu_Proximal_F1_DL1(u_for_F1,v_for_F1,warpIm2,Im1,Im2,z_u,z_v,width,height,nChannels,alpha,beta,lambda,nOuterFPIter,nInnerFPIter,nSORIter); cu_Compute_z_u_z_v_for_proximal_F2(z_u,z_v,u_for_G,v_for_G,u_for_q2,v_for_q2,width,height,1); cu_Proximal_F2_last(u_for_F2,v_for_F2,z_u,z_v,pre_u,pre_v,width,height,new_gamma,lambda,nWarpFPIter,nPoissonIter); cu_Compute_z_u_z_v_for_proximal_G(z_u,z_v,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_q1,v_for_q1,u_for_q2,v_for_q2,width,height,1); cu_Proximal_G(u_for_G,v_for_G,z_u,z_v,width,height,nPoissonIter); cu_Update_u_v_for_q1_q2(u_for_q1,v_for_q1,u_for_q2,v_for_q2,u_for_F1,v_for_F1,u_for_F2,v_for_F2,u_for_G,v_for_G,width,height,1); } checkCudaErrors( cudaFree(u_for_F2) ); checkCudaErrors( cudaFree(v_for_F2) ); checkCudaErrors( cudaFree(u_for_G) ); checkCudaErrors( cudaFree(v_for_G) ); checkCudaErrors( cudaFree(u_for_q1) ); checkCudaErrors( cudaFree(v_for_q1) ); checkCudaErrors( cudaFree(u_for_q2) ); checkCudaErrors( cudaFree(v_for_q2) ); checkCudaErrors( cudaFree(z_u) ); checkCudaErrors( cudaFree(z_v) ); u_for_F1 = 0; v_for_F1 = 0; u_for_F2 = 0; v_for_F2 = 0; u_for_G = 0; v_for_G = 0; u_for_q1 = 0; v_for_q1 = 0; u_for_q2 = 0; v_for_q2 = 0; z_u = 0; z_v = 0; } /***********************************************************************/ extern "C" void InitDevice2D(const int deviceid) { int num_devices = 0; checkCudaErrors(cudaGetDeviceCount(&num_devices)); int cur_device = deviceid; if(deviceid < 0 || deviceid >= num_devices) { cur_device = 0; cudaDeviceProp properties; cudaGetDeviceProperties(&properties, cur_device); printf("use the Device ID:\t%d\n", cur_device); printf("Device Name is used:\t%s\n", properties.name ); } checkCudaErrors(cudaSetDevice(cur_device)); } extern "C" float OpticalFlow2D_L2(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); cu_OpticalFlow_L2(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,nOuterFPIter,nSORIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_L2_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nSORIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); cu_OpticalFlow_L2_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,width,height,nChannels,alpha,beta,nOuterFPIter,nSORIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(occupy_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_L1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter,const int nSORIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); cu_OpticalFlow_L1(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,nOuterFPIter,nInnerFPIter,nSORIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels,cudaMemcpyHostToDevice) ); cu_OpticalFlow_DL1(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,nOuterFPIter,nInnerFPIter,nSORIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels,cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,lambda,ADMMIter,nOuterFPIter,nSORIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,width,height,nChannels,alpha,beta,lambda,ADMMIter,nOuterFPIter,nSORIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(occupy_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const int width, const int height, const int nChannels, const float alpha, const float beta, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* u_d = 0; float* v_d = 0; float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1(u_d,v_d,warpIm2_d,Im1_d,Im2_d,width,height,nChannels,alpha,beta,lambda,ADMMIter,nOuterFPIter,nInnerFPIter,nSORIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_u_d,next_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_v_d,next_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_First(u_d,v_d,warpIm2_d,Im1_d,Im2_d,next_u_d,next_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(next_u_d) ); checkCudaErrors( cudaFree(next_v_d) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_First_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_u_d,next_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_v_d,next_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_First_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,next_u_d,next_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(next_u_d) ); checkCudaErrors( cudaFree(next_v_d) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(occupy_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1_First(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_u_d,next_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_v_d,next_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1_First(u_d,v_d,warpIm2_d,Im1_d,Im2_d,next_u_d,next_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nInnerFPIter, nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(next_u_d) ); checkCudaErrors( cudaFree(next_v_d) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_u_d,next_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_v_d,next_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Middle(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,next_u_d,next_v_d,width,height,nChannels, alpha,beta,gamma,lambda,ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(next_u_d) ); checkCudaErrors( cudaFree(next_v_d) ); checkCudaErrors( cudaFree(pre_u_d) ); checkCudaErrors( cudaFree(pre_v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; pre_u_d = 0; pre_v_d = 0; u_d = 0; v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Middle_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_u_d,next_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_v_d,next_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Middle_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,pre_u_d,pre_v_d,next_u_d,next_v_d,width,height,nChannels, alpha,beta,gamma,lambda,ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(next_u_d) ); checkCudaErrors( cudaFree(next_v_d) ); checkCudaErrors( cudaFree(pre_u_d) ); checkCudaErrors( cudaFree(pre_v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(occupy_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; pre_u_d = 0; pre_v_d = 0; u_d = 0; v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1_Middle(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const float* next_u, const float* next_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* next_u_d = 0; float* next_v_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&next_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&next_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_u_d,next_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(next_v_d,next_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1_Middle(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,next_u_d,next_v_d,width,height,nChannels, alpha,beta,gamma,lambda,ADMMIter,nOuterFPIter,nInnerFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(next_u_d) ); checkCudaErrors( cudaFree(next_v_d) ); checkCudaErrors( cudaFree(pre_u_d) ); checkCudaErrors( cudaFree(pre_v_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); Im1_d = 0; Im2_d = 0; warpIm2_d = 0; next_u_d = 0; next_v_d = 0; pre_u_d = 0; pre_v_d = 0; u_d = 0; v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Last(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(pre_u_d) ); checkCudaErrors( cudaFree(pre_v_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; pre_u_d = 0; pre_v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_Last_Occupy(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* occupy, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* occupy_d = 0; float* warpIm2_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&occupy_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(occupy_d,occupy,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_Last_Occupy(u_d,v_d,warpIm2_d,Im1_d,Im2_d,occupy_d,pre_u_d,pre_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(occupy_d) ); checkCudaErrors( cudaFree(pre_u_d) ); checkCudaErrors( cudaFree(pre_v_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; occupy_d = 0; warpIm2_d = 0; pre_u_d = 0; pre_v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } extern "C" float OpticalFlow2D_ADMM_DL1_Last(float* u, float* v, float* warpIm2, const float* Im1, const float* Im2, const float* pre_u, const float* pre_v, const int width, const int height, const int nChannels, const float alpha, const float beta, const float gamma, const float lambda, const int ADMMIter, const int nOuterFPIter, const int nInnerFPIter, const int nSORIter, const int nWarpFPIter, const int nPoissonIter) { float time = 0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float* Im1_d = 0; float* Im2_d = 0; float* warpIm2_d = 0; float* pre_u_d = 0; float* pre_v_d = 0; float* u_d = 0; float* v_d = 0; checkCudaErrors( cudaMalloc((void**)&Im1_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&Im2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&warpIm2_d,sizeof(float)*width*height*nChannels) ); checkCudaErrors( cudaMalloc((void**)&pre_u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&pre_v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&u_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMalloc((void**)&v_d,sizeof(float)*width*height) ); checkCudaErrors( cudaMemcpy(Im1_d,Im1,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(Im2_d,Im2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(warpIm2_d,warpIm2,sizeof(float)*width*height*nChannels, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_u_d,pre_u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(pre_v_d,pre_v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(u_d,u,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(v_d,v,sizeof(float)*width*height, cudaMemcpyHostToDevice) ); cu_OpticalFlow_ADMM_DL1_Last(u_d,v_d,warpIm2_d,Im1_d,Im2_d,pre_u_d,pre_v_d,width,height,nChannels,alpha,beta,gamma,lambda, ADMMIter,nOuterFPIter,nSORIter,nInnerFPIter,nWarpFPIter,nPoissonIter); checkCudaErrors( cudaMemcpy(u,u_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(v,v_d,sizeof(float)*width*height, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy(warpIm2,warpIm2_d,sizeof(float)*width*height*nChannels, cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(u_d) ); checkCudaErrors( cudaFree(v_d) ); checkCudaErrors( cudaFree(warpIm2_d) ); checkCudaErrors( cudaFree(Im1_d) ); checkCudaErrors( cudaFree(Im2_d) ); checkCudaErrors( cudaFree(pre_u_d) ); checkCudaErrors( cudaFree(pre_v_d) ); u_d = 0; v_d = 0; Im1_d = 0; Im2_d = 0; warpIm2_d = 0; pre_u_d = 0; pre_v_d = 0; cudaEventRecord(stop,0); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start,stop); return time; } } #endif
855a83fda758ed36a200dd57a0a187a70a355140.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <THH/THHAtomics.cuh> #include <cmath> using namespace at; #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 // 32 * 32 #define WARP_SIZE 32 #define THREADS_PER_PIXEL 32 #define MAX_SHARED_MEMORY 49152 #define MAX_SHARED_SCALAR_T 6144 // 49152 / 8 = 6144 #define MAXIMIZE_KERNEL_SIZE true #define kTileDim 32 #define kBlockRows 8 #define FULL_MASK 0xffffffff inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } /* TODO: move this to a common place */ template <typename scalar_t> __device__ inline scalar_t min(scalar_t a, scalar_t b) { return a < b ? a : b; } template <typename scalar_t> __device__ inline scalar_t max(scalar_t a, scalar_t b) { return a > b ? a : b; } template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduceSum(scalar_t val) { for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(FULL_MASK, val, offset); return val; } // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename scalar_t> __global__ void BatchTranspose2DCUDAKernel(const int N, const int H, const int W, const int dh, const int dw, const scalar_t *__restrict__ X, scalar_t *__restrict__ Y) { __shared__ scalar_t tile[kTileDim][kTileDim + 1]; const int n = blockIdx.x / (dh * dw); const int k = blockIdx.x % (dh * dw); const int r = k / dw; const int c = k % dw; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; if (x < W) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x]; } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) { Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } } template <typename scalar_t> __global__ void CARAFEForward( const int num_kernels, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ top_data) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int start_w = down_pw - (kernel_size - 1) / 2; const int end_w = down_pw + (kernel_size - 1) / 2 + 1; const int start_h = down_ph - (kernel_size - 1) / 2; const int end_h = down_ph + (kernel_size - 1) / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { int mask_index = Loc2Index(n, ph, pw, c, height, width, mask_channels); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy++) { #pragma unroll for (int ix = start_w; ix < end_w; ix++) { if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) { continue; } int mask_iy = iy - down_ph + (kernel_size - 1) / 2; int mask_ix = ix - down_pw + (kernel_size - 1) / 2; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, down_height, down_width, channels); output_val += bottom_data[feat_index] * shared_mask[mask_c * WARP_SIZE + pixel_id]; } } int top_index = Loc2Index(n, ph, pw, c, height, width, channels); top_data[top_index] = output_val; } } int CARAFEForwardLaucher(const at::Tensor features, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rfeatures, at::Tensor routput, at::Tensor rmasks, at::Tensor output) { // one warp per pixel hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "NCHW2NHWC_Feature", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); scalar_t *top_data = rfeatures.data<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(input_height * input_width, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, channels, input_height * input_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "NCHW2NHWC_Masks", ([&] { const scalar_t *bottom_data = masks.data<scalar_t>(); scalar_t *top_data = rmasks.data<scalar_t>(); const int dh = divideUP(mask_channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, mask_channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "CARAFELaucherForward", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *bottom_data = rfeatures.data<scalar_t>(); const scalar_t *bottom_masks = rmasks.data<scalar_t>(); scalar_t *top_data = routput.data<scalar_t>(); hipLaunchKernelGGL(( CARAFEForward<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, bottom_data, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "NHWC2NCHW", ([&] { const scalar_t *bottom_data = routput.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(channels, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, output_height * output_width, channels, dh, dw, bottom_data, top_data); })); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void CARAFEBackward_Feature( const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ bottom_diff) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; // (n, c, ph, pw) is an element in the bottom_data index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int start_w = pw - (kernel_size - 1) * scale_factor / 2; const int end_w = pw + (kernel_size - 1) * scale_factor / 2 + 1; const int start_h = ph - (kernel_size - 1) * scale_factor / 2; const int end_h = ph + (kernel_size - 1) * scale_factor / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { const int mask_w = (c % kernel_size) * scale_factor; const int mask_h = (c / kernel_size % kernel_size) * scale_factor; const int mask_x = start_w + mask_w; const int mask_y = start_h + mask_h; if (mask_y < 0 || mask_y > height - 1 || mask_x < 0 || mask_x > width - 1) { shared_mask[c * WARP_SIZE + pixel_id] = 0; continue; } const int mask_group = c / (kernel_size * kernel_size); const int mask_c = (2 * mask_group + 1) * kernel_size * kernel_size - c - 1; int mask_index = Loc2Index(n, mask_c, mask_y, mask_x, mask_channels, height, width); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; int top_index = Loc2Index(n, ph, pw, c, height, width, channels); scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy += scale_factor) { #pragma unroll for (int ix = start_w; ix < end_w; ix += scale_factor) { if (iy < 0 || iy > height - 1 || ix < 0 || ix > width - 1) { continue; } int mask_iy = (iy - ph + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_ix = (ix - pw + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, height, width, channels); output_val += shared_mask[mask_c * WARP_SIZE + pixel_id] * top_diff[feat_index]; } } bottom_diff[top_index] = output_val; } } template <typename scalar_t> __global__ void FeatureSum(const int num_kernels, const scalar_t *__restrict__ input_data, const int scale_factor, const int channels, const int height, const int width, scalar_t *__restrict__ output_data) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { scalar_t output_val = 0; for (int iy = ph * scale_factor; iy < (ph + 1) * scale_factor; iy++) { for (int ix = pw * scale_factor; ix < (pw + 1) * scale_factor; ix++) { int input_id = Loc2Index(n, iy, ix, c, height * scale_factor, width * scale_factor, channels); output_val += input_data[input_id]; } } const int output_id = Loc2Index(n, ph, pw, c, height, width, channels); output_data[output_id] = output_val; } } template <typename scalar_t> __global__ void CARAFEBackward_Mask(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ mask_diff) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int mask_c = index % mask_channels; // (n, c, ph, pw) is an element in the bottom_data index = index / mask_channels; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int mask_group = mask_c / (kernel_size * kernel_size); const int mask_loc = mask_c % (kernel_size * kernel_size); const int offset_x = mask_loc % kernel_size - (kernel_size - 1) / 2; const int offset_y = mask_loc / kernel_size % kernel_size - (kernel_size - 1) / 2; const int down_x = down_pw + offset_x; const int down_y = down_ph + offset_y; scalar_t output_val = 0; if (down_y >= 0 && down_y <= down_height - 1 && down_x >= 0 && down_x <= down_width - 1) { const int channels_per_mask = ceilf(channels / (float)group_size); const int start = channels_per_mask * mask_group; const int end = min(channels_per_mask * (mask_group + 1), channels); for (int c = start + lane_id; c < end; c += WARP_SIZE) { int bottom_id = Loc2Index(n, down_y, down_x, c, down_height, down_width, channels); int top_id = Loc2Index(n, ph, pw, c, height, width, channels); output_val += top_diff[top_id] * bottom_data[bottom_id]; } } __syncwarp(); output_val = warpReduceSum(output_val); if (lane_id == 0) { const int mask_id = Loc2Index(n, ph, pw, mask_c, height, width, mask_channels); mask_diff[mask_id] = output_val; } } int CARAFEBackwardLaucher(const at::Tensor top_grad, const at::Tensor rfeatures, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rtop_grad, at::Tensor rbottom_grad_hs, at::Tensor rbottom_grad, at::Tensor rmask_grad, at::Tensor bottom_grad, at::Tensor mask_grad) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NCHW2NHWC_Top_Grad", ([&] { const scalar_t *bottom_data = top_grad.data<scalar_t>(); scalar_t *top_data = rtop_grad.data<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "CARAFELaucherBackward_Feature", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_masks = masks.data<scalar_t>(); scalar_t *bottom_diff = rbottom_grad_hs.data<scalar_t>(); hipLaunchKernelGGL(( CARAFEBackward_Feature<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, top_diff, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "FeatureSum", ([&] { const int num_kernels = batch_size * input_height * input_width * THREADS_PER_PIXEL; const scalar_t *bottom_diff_hs = rbottom_grad_hs.data<scalar_t>(); scalar_t *bottom_diff = rbottom_grad.data<scalar_t>(); hipLaunchKernelGGL(( FeatureSum<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, bottom_diff_hs, scale_factor, channels, input_height, input_width, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Bottom_Grad", ([&] { const scalar_t *bottom_data = rbottom_grad.data<scalar_t>(); scalar_t *top_data = bottom_grad.data<scalar_t>(); const int dh = divideUP(input_height * input_width, kTileDim); const int dw = divideUP(channels, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, input_height * input_width, channels, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES( top_grad.type(), "CARAFELaucherBackward_Mask", ([&] { const int num_kernels = batch_size * output_height * output_width * mask_channels * WARP_SIZE; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_data = rfeatures.data<scalar_t>(); scalar_t *mask_diff = rmask_grad.data<scalar_t>(); hipLaunchKernelGGL(( CARAFEBackward_Mask<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, top_diff, bottom_data, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, mask_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Mask_Grad", ([&] { const scalar_t *bottom_data = rmask_grad.data<scalar_t>(); scalar_t *top_data = mask_grad.data<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(mask_channels, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, output_height * output_width, mask_channels, dh, dw, bottom_data, top_data); })); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; }
855a83fda758ed36a200dd57a0a187a70a355140.cu
#include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCAtomics.cuh> #include <cmath> using namespace at; #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 // 32 * 32 #define WARP_SIZE 32 #define THREADS_PER_PIXEL 32 #define MAX_SHARED_MEMORY 49152 #define MAX_SHARED_SCALAR_T 6144 // 49152 / 8 = 6144 #define MAXIMIZE_KERNEL_SIZE true #define kTileDim 32 #define kBlockRows 8 #define FULL_MASK 0xffffffff inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } /* TODO: move this to a common place */ template <typename scalar_t> __device__ inline scalar_t min(scalar_t a, scalar_t b) { return a < b ? a : b; } template <typename scalar_t> __device__ inline scalar_t max(scalar_t a, scalar_t b) { return a > b ? a : b; } template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduceSum(scalar_t val) { for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(FULL_MASK, val, offset); return val; } // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename scalar_t> __global__ void BatchTranspose2DCUDAKernel(const int N, const int H, const int W, const int dh, const int dw, const scalar_t *__restrict__ X, scalar_t *__restrict__ Y) { __shared__ scalar_t tile[kTileDim][kTileDim + 1]; const int n = blockIdx.x / (dh * dw); const int k = blockIdx.x % (dh * dw); const int r = k / dw; const int c = k % dw; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; if (x < W) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x]; } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) { Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } } template <typename scalar_t> __global__ void CARAFEForward( const int num_kernels, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ top_data) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int start_w = down_pw - (kernel_size - 1) / 2; const int end_w = down_pw + (kernel_size - 1) / 2 + 1; const int start_h = down_ph - (kernel_size - 1) / 2; const int end_h = down_ph + (kernel_size - 1) / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { int mask_index = Loc2Index(n, ph, pw, c, height, width, mask_channels); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy++) { #pragma unroll for (int ix = start_w; ix < end_w; ix++) { if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) { continue; } int mask_iy = iy - down_ph + (kernel_size - 1) / 2; int mask_ix = ix - down_pw + (kernel_size - 1) / 2; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, down_height, down_width, channels); output_val += bottom_data[feat_index] * shared_mask[mask_c * WARP_SIZE + pixel_id]; } } int top_index = Loc2Index(n, ph, pw, c, height, width, channels); top_data[top_index] = output_val; } } int CARAFEForwardLaucher(const at::Tensor features, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rfeatures, at::Tensor routput, at::Tensor rmasks, at::Tensor output) { // one warp per pixel cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "NCHW2NHWC_Feature", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); scalar_t *top_data = rfeatures.data<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(input_height * input_width, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, channels, input_height * input_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "NCHW2NHWC_Masks", ([&] { const scalar_t *bottom_data = masks.data<scalar_t>(); scalar_t *top_data = rmasks.data<scalar_t>(); const int dh = divideUP(mask_channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, mask_channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "CARAFELaucherForward", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *bottom_data = rfeatures.data<scalar_t>(); const scalar_t *bottom_masks = rmasks.data<scalar_t>(); scalar_t *top_data = routput.data<scalar_t>(); CARAFEForward<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, bottom_data, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "NHWC2NCHW", ([&] { const scalar_t *bottom_data = routput.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(channels, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, output_height * output_width, channels, dh, dw, bottom_data, top_data); })); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void CARAFEBackward_Feature( const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ bottom_diff) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; // (n, c, ph, pw) is an element in the bottom_data index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int start_w = pw - (kernel_size - 1) * scale_factor / 2; const int end_w = pw + (kernel_size - 1) * scale_factor / 2 + 1; const int start_h = ph - (kernel_size - 1) * scale_factor / 2; const int end_h = ph + (kernel_size - 1) * scale_factor / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { const int mask_w = (c % kernel_size) * scale_factor; const int mask_h = (c / kernel_size % kernel_size) * scale_factor; const int mask_x = start_w + mask_w; const int mask_y = start_h + mask_h; if (mask_y < 0 || mask_y > height - 1 || mask_x < 0 || mask_x > width - 1) { shared_mask[c * WARP_SIZE + pixel_id] = 0; continue; } const int mask_group = c / (kernel_size * kernel_size); const int mask_c = (2 * mask_group + 1) * kernel_size * kernel_size - c - 1; int mask_index = Loc2Index(n, mask_c, mask_y, mask_x, mask_channels, height, width); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; int top_index = Loc2Index(n, ph, pw, c, height, width, channels); scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy += scale_factor) { #pragma unroll for (int ix = start_w; ix < end_w; ix += scale_factor) { if (iy < 0 || iy > height - 1 || ix < 0 || ix > width - 1) { continue; } int mask_iy = (iy - ph + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_ix = (ix - pw + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, height, width, channels); output_val += shared_mask[mask_c * WARP_SIZE + pixel_id] * top_diff[feat_index]; } } bottom_diff[top_index] = output_val; } } template <typename scalar_t> __global__ void FeatureSum(const int num_kernels, const scalar_t *__restrict__ input_data, const int scale_factor, const int channels, const int height, const int width, scalar_t *__restrict__ output_data) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { scalar_t output_val = 0; for (int iy = ph * scale_factor; iy < (ph + 1) * scale_factor; iy++) { for (int ix = pw * scale_factor; ix < (pw + 1) * scale_factor; ix++) { int input_id = Loc2Index(n, iy, ix, c, height * scale_factor, width * scale_factor, channels); output_val += input_data[input_id]; } } const int output_id = Loc2Index(n, ph, pw, c, height, width, channels); output_data[output_id] = output_val; } } template <typename scalar_t> __global__ void CARAFEBackward_Mask(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ mask_diff) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int mask_c = index % mask_channels; // (n, c, ph, pw) is an element in the bottom_data index = index / mask_channels; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int mask_group = mask_c / (kernel_size * kernel_size); const int mask_loc = mask_c % (kernel_size * kernel_size); const int offset_x = mask_loc % kernel_size - (kernel_size - 1) / 2; const int offset_y = mask_loc / kernel_size % kernel_size - (kernel_size - 1) / 2; const int down_x = down_pw + offset_x; const int down_y = down_ph + offset_y; scalar_t output_val = 0; if (down_y >= 0 && down_y <= down_height - 1 && down_x >= 0 && down_x <= down_width - 1) { const int channels_per_mask = ceilf(channels / (float)group_size); const int start = channels_per_mask * mask_group; const int end = min(channels_per_mask * (mask_group + 1), channels); for (int c = start + lane_id; c < end; c += WARP_SIZE) { int bottom_id = Loc2Index(n, down_y, down_x, c, down_height, down_width, channels); int top_id = Loc2Index(n, ph, pw, c, height, width, channels); output_val += top_diff[top_id] * bottom_data[bottom_id]; } } __syncwarp(); output_val = warpReduceSum(output_val); if (lane_id == 0) { const int mask_id = Loc2Index(n, ph, pw, mask_c, height, width, mask_channels); mask_diff[mask_id] = output_val; } } int CARAFEBackwardLaucher(const at::Tensor top_grad, const at::Tensor rfeatures, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rtop_grad, at::Tensor rbottom_grad_hs, at::Tensor rbottom_grad, at::Tensor rmask_grad, at::Tensor bottom_grad, at::Tensor mask_grad) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NCHW2NHWC_Top_Grad", ([&] { const scalar_t *bottom_data = top_grad.data<scalar_t>(); scalar_t *top_data = rtop_grad.data<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "CARAFELaucherBackward_Feature", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_masks = masks.data<scalar_t>(); scalar_t *bottom_diff = rbottom_grad_hs.data<scalar_t>(); CARAFEBackward_Feature<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, top_diff, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "FeatureSum", ([&] { const int num_kernels = batch_size * input_height * input_width * THREADS_PER_PIXEL; const scalar_t *bottom_diff_hs = rbottom_grad_hs.data<scalar_t>(); scalar_t *bottom_diff = rbottom_grad.data<scalar_t>(); FeatureSum<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, bottom_diff_hs, scale_factor, channels, input_height, input_width, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Bottom_Grad", ([&] { const scalar_t *bottom_data = rbottom_grad.data<scalar_t>(); scalar_t *top_data = bottom_grad.data<scalar_t>(); const int dh = divideUP(input_height * input_width, kTileDim); const int dw = divideUP(channels, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, input_height * input_width, channels, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES( top_grad.type(), "CARAFELaucherBackward_Mask", ([&] { const int num_kernels = batch_size * output_height * output_width * mask_channels * WARP_SIZE; const scalar_t *top_diff = rtop_grad.data<scalar_t>(); const scalar_t *bottom_data = rfeatures.data<scalar_t>(); scalar_t *mask_diff = rmask_grad.data<scalar_t>(); CARAFEBackward_Mask<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, top_diff, bottom_data, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, mask_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "NHWC2NCHW_Mask_Grad", ([&] { const scalar_t *bottom_data = rmask_grad.data<scalar_t>(); scalar_t *top_data = mask_grad.data<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(mask_channels, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, output_height * output_width, mask_channels, dh, dw, bottom_data, top_data); })); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
f8abc140c7abe4108248911fde5c88b126fa45fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <exceptions/cuda_exception.h> #include <rocblas.h> #include "../MmulHelper.h" #include <specials_cuda.h> namespace nd4j { ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN // C array must be in f order template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB)); T3* C = reinterpret_cast<T3*>(vC); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0 && col == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; } } __syncthreads(); T3 val = 0; if (row < M && col < N) for (int i = 0; i < K; i++) val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol]; C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc]; } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { hipLaunchKernelGGL(( usualCudaGemm<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc); } ////////////////////////////////////////////////////////////////////////////// // MXN x N = M template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX)); T3* Y = reinterpret_cast<T3*>(vY); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol; const int row = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } } __syncthreads(); T3 val = 0; if (row < M) for (int i = 0; i < N; i++) val = val + A[row * strideArow + i * strideAcol] * X[i * incx]; Y[row * incy] = alphaZ * val + betaZ * Y[row * incy]; } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { hipLaunchKernelGGL(( usualCudaGemv<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy); } ////////////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX)); T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY)); T3* Z = reinterpret_cast<T3*>(vZ); extern __shared__ char shmem[]; auto pairwiseMul = reinterpret_cast<T3*>(shmem); const int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < length) pairwiseMul[tid] = X[tid * incx] * Y[tid * incy]; __syncthreads(); if(tid == 0) { T3 sum = 0; for(Nd4jLong i = 0; i < length; ++i) sum = sum + pairwiseMul[i]; *Z = (T3)alpha * sum + (T3)beta * *Z; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { hipLaunchKernelGGL(( usualCudaDot<T1,T2,T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), length*sizeof(T3) + 128, *stream, length, alpha, vX, incx, vY, incy, beta, vZ); } ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) { if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !"); if(B->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !"); auto M = A->sizeAt(0); auto K = A->sizeAt(1); auto N = B->sizeAt(1); if(C != nullptr && C->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !"); if(B->sizeAt(0) != K) throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !"); if(C != nullptr && C->sizeAt(0) != M) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !"); if(C != nullptr && C->sizeAt(1) != N) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !"); if(C == nullptr) C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext()); NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C)); std::vector<NDArray*> toDelete; if(A->ews() != 1) { pA = pA->dup('f'); toDelete.push_back(pA); } if(B->ews() != 1) { pB = pB->dup('f'); toDelete.push_back(pB); } if(C->ews() != 1) { pC = pC->dup('f'); toDelete.push_back(pC); } if(pC->ordering() != 'f') { auto temp = pA; pA = new NDArray(pB ->permute({1,0})); pB = new NDArray(temp->permute({1,0})); pC = new NDArray(pC ->permute({1,0})); toDelete.push_back(pA); toDelete.push_back(pB); toDelete.push_back(pC); M = pA->sizeAt(0); K = pA->sizeAt(1); N = pB->sizeAt(1); } const auto aOrder = pA->ordering(); const auto bOrder = pB->ordering(); const bool transA = aOrder != 'f'; const bool transB = bOrder != 'f'; const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N; const int lda = aOrder == 'f' ? M : K; const int ldb = bOrder == 'f' ? K : N; const int ldc = M; // cOrder == 'f' ? M : N; const auto aType = pA->dataType(); const auto bType = pB->dataType(); const auto cType = pC->dataType(); auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = hipblasSetStream(*handle, *stream); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC); NDArray::prepareSpecialUse({pC}, {pA, pB}); // choose appropriate cuda gemm api depending on data types if(ABC && aType == DataType::DOUBLE) { status = hipblasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)pB->getSpecialBuffer(), ldb, &beta, (double*)pC->getSpecialBuffer(), ldc); } else if(ABC && aType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = hipblasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)pB->getSpecialBuffer(), ldb, &betaF, (float*)pC->getSpecialBuffer(), ldc); } else if(ABC && aType == DataType::HALF) { printf("!!!!!!!!\n"); float16 alphaH(alpha), betaH(beta); status = hipblasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->getSpecialBuffer(), lda, (__half*)pB->getSpecialBuffer(), ldb, &betaH.data, (__half*)pC->getSpecialBuffer(), ldc); } else if(AB && aType == DataType::INT8 && cType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), HIP_R_8I, lda, pB->getSpecialBuffer(), HIP_R_8I, ldb, &betaF, pC->getSpecialBuffer(), HIP_R_32F, ldc); } else if(AB && aType == DataType::HALF && cType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), HIP_R_16F, lda, pB->getSpecialBuffer(), HIP_R_16F, ldb, &betaF, pC->getSpecialBuffer(), HIP_R_32F, ldc); } else { dim3 threadsPerBlock(N, M); dim3 blocksPerGrid(1, 1); if (M*N > 512){ threadsPerBlock.x = threadsPerBlock.y = 512; blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(N) / threadsPerBlock.x); // cols blocksPerGrid.y = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.y); // rows } //BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), NUMERIC_TYPES) } if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult); NDArray::registerSpecialUse({pC}, {pA, pB}); if(C->ews() != 1) C->assign(pC); for(int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; return C; } //////////////////////////////////////////////////////////////////////////// // MXN x N = M NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) { int xLenDim, yLenDim(0); if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !"); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !"); const auto M = A->sizeAt(0); const auto N = A->sizeAt(1); if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !"); if(X->lengthOf() != N) throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !"); if(Y != nullptr && Y->lengthOf() != M) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !"); if(Y == nullptr) Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext()); NDArray *pA(const_cast<NDArray*>(A)); if(A->ews() != 1) pA = pA->dup('f'); const bool transA = pA->ordering() == 'c'; const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N; int lda, lta; if(transA) { lda = N; lta = M; } else { lda = M; lta = N; } const int incx = X->stridesOf()[xLenDim]; const int incy = Y->stridesOf()[yLenDim]; const auto aType = pA->dataType(); const auto xType = X->dataType(); const auto yType = Y->dataType(); auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = hipblasSetStream(*handle, *stream); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY); NDArray::prepareSpecialUse({Y}, {pA, X}); // choose appropriate cuda gemm api depending on data types if(AXY && aType == DataType::DOUBLE) { status = hipblasDgemv(*handle, transAblas, lda, lta, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)X->getSpecialBuffer(), incx, &beta, (double*)Y->getSpecialBuffer(), incy); } else if(AXY && aType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = hipblasSgemv(*handle, transAblas, lda, lta, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)X->getSpecialBuffer(), incx, &betaF, (float*)Y->getSpecialBuffer(), incy); } else { dim3 threadsPerBlock(M); dim3 blocksPerGrid(1); if (M > 512){ threadsPerBlock.x = 512; blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.x); // rows } //BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), NUMERIC_TYPES) } if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult); NDArray::registerSpecialUse({Y}, {pA, X}); if(pA != A) delete pA; return Y; } //////////////////////////////////////////////////////////////////////////// // (X * Y) = Z[0] NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, nd4j::NDArray* Z, const double alpha, const double beta) { int xLenDim(0), yLenDim(0); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !"); if(!shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !"); if(Z != nullptr && !Z->isScalar()) throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !"); const auto length = X->lengthOf(); if(Y->lengthOf() != length) throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !"); if(Z == nullptr) Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext()); const Nd4jLong incx = X->stridesOf()[xLenDim]; const Nd4jLong incy = Y->stridesOf()[yLenDim]; const auto xType = X->dataType(); const auto yType = Y->dataType(); const auto zType = Z->dataType(); if(!X->isActualOnDeviceSide()) X->syncToDevice(); if(!Y->isActualOnDeviceSide()) Y->syncToDevice(); if(!Z->isActualOnDeviceSide()) Z->syncToDevice(); hipStream_t* stream = X->getContext()->getCudaStream(); dim3 threadsPerBlock(512); dim3 blocksPerGrid(1); if (length > 512) threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512); NDArray::prepareSpecialUse({Z}, {X, Y}); //BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES) auto cudaResult = hipStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult); NDArray::registerSpecialUse({Z}, {X, Y}); return Z; } //BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); }
f8abc140c7abe4108248911fde5c88b126fa45fb.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <exceptions/cuda_exception.h> #include <cublas_v2.h> #include "../MmulHelper.h" #include <specials_cuda.h> namespace nd4j { ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN // C array must be in f order template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB)); T3* C = reinterpret_cast<T3*>(vC); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol, strideBrow, strideBcol; const int row = blockIdx.y * blockDim.y + threadIdx.y; const int col = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0 && col == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; } } __syncthreads(); T3 val = 0; if (row < M && col < N) for (int i = 0; i < K; i++) val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol]; C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc]; } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc) { usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda, vB, ldb, beta, vC, ldc); } ////////////////////////////////////////////////////////////////////////////// // MXN x N = M template <typename T1, typename T2, typename T3> static __global__ void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA)); T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX)); T3* Y = reinterpret_cast<T3*>(vY); __shared__ T3 alphaZ, betaZ; __shared__ Nd4jLong strideArow, strideAcol; const int row = blockIdx.x * blockDim.x + threadIdx.x; if(row == 0) { alphaZ = alpha; betaZ = beta; if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; } } __syncthreads(); T3 val = 0; if (row < M) for (int i = 0; i < N; i++) val = val + A[row * strideArow + i * strideAcol] * X[i * incx]; Y[row * incy] = alphaZ * val + betaZ * Y[row * incy]; } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) { usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx, beta, vY, incy); } ////////////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> static __global__ void usualCudaDot(const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX)); T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY)); T3* Z = reinterpret_cast<T3*>(vZ); extern __shared__ char shmem[]; auto pairwiseMul = reinterpret_cast<T3*>(shmem); const int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < length) pairwiseMul[tid] = X[tid * incx] * Y[tid * incy]; __syncthreads(); if(tid == 0) { T3 sum = 0; for(Nd4jLong i = 0; i < length; ++i) sum = sum + pairwiseMul[i]; *Z = (T3)alpha * sum + (T3)beta * *Z; } } //////////////////////////////////////////////////////////////////////// template <typename T1, typename T2, typename T3> __host__ static void usualDot(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ) { usualCudaDot<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, length*sizeof(T3) + 128, *stream>>>(length, alpha, vX, incx, vY, incy, beta, vZ); } ////////////////////////////////////////////////////////////////////////////// // MXK x KxN = MxN NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta, const char outOrder) { if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !"); if(B->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !"); auto M = A->sizeAt(0); auto K = A->sizeAt(1); auto N = B->sizeAt(1); if(C != nullptr && C->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !"); if(B->sizeAt(0) != K) throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !"); if(C != nullptr && C->sizeAt(0) != M) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !"); if(C != nullptr && C->sizeAt(1) != N) throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !"); if(C == nullptr) C = new NDArray(outOrder, {M,N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()), A->getContext()); NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C)); std::vector<NDArray*> toDelete; if(A->ews() != 1) { pA = pA->dup('f'); toDelete.push_back(pA); } if(B->ews() != 1) { pB = pB->dup('f'); toDelete.push_back(pB); } if(C->ews() != 1) { pC = pC->dup('f'); toDelete.push_back(pC); } if(pC->ordering() != 'f') { auto temp = pA; pA = new NDArray(pB ->permute({1,0})); pB = new NDArray(temp->permute({1,0})); pC = new NDArray(pC ->permute({1,0})); toDelete.push_back(pA); toDelete.push_back(pB); toDelete.push_back(pC); M = pA->sizeAt(0); K = pA->sizeAt(1); N = pB->sizeAt(1); } const auto aOrder = pA->ordering(); const auto bOrder = pB->ordering(); const bool transA = aOrder != 'f'; const bool transB = bOrder != 'f'; const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N; const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N; const int lda = aOrder == 'f' ? M : K; const int ldb = bOrder == 'f' ? K : N; const int ldc = M; // cOrder == 'f' ? M : N; const auto aType = pA->dataType(); const auto bType = pB->dataType(); const auto cType = pC->dataType(); auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = cublasSetStream_v2(*handle, *stream); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC); NDArray::prepareSpecialUse({pC}, {pA, pB}); // choose appropriate cuda gemm api depending on data types if(ABC && aType == DataType::DOUBLE) { status = cublasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)pB->getSpecialBuffer(), ldb, &beta, (double*)pC->getSpecialBuffer(), ldc); } else if(ABC && aType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = cublasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)pB->getSpecialBuffer(), ldb, &betaF, (float*)pC->getSpecialBuffer(), ldc); } else if(ABC && aType == DataType::HALF) { printf("!!!!!!!!\n"); float16 alphaH(alpha), betaH(beta); status = cublasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->getSpecialBuffer(), lda, (__half*)pB->getSpecialBuffer(), ldb, &betaH.data, (__half*)pC->getSpecialBuffer(), ldc); } else if(AB && aType == DataType::INT8 && cType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), CUDA_R_8I, lda, pB->getSpecialBuffer(), CUDA_R_8I, ldb, &betaF, pC->getSpecialBuffer(), CUDA_R_32F, ldc); } else if(AB && aType == DataType::HALF && cType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->getSpecialBuffer(), CUDA_R_16F, lda, pB->getSpecialBuffer(), CUDA_R_16F, ldb, &betaF, pC->getSpecialBuffer(), CUDA_R_32F, ldc); } else { dim3 threadsPerBlock(N, M); dim3 blocksPerGrid(1, 1); if (M*N > 512){ threadsPerBlock.x = threadsPerBlock.y = 512; blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(N) / threadsPerBlock.x); // cols blocksPerGrid.y = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.y); // rows } //BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm, (blocksPerGrid, threadsPerBlock, stream, transA, transB, M, N, K, alpha, pA->getSpecialBuffer(), lda, pB->getSpecialBuffer(), ldb, beta, pC->getSpecialBuffer(), ldc), NUMERIC_TYPES) } if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status); auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult); NDArray::registerSpecialUse({pC}, {pA, pB}); if(C->ews() != 1) C->assign(pC); for(int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; return C; } //////////////////////////////////////////////////////////////////////////// // MXN x N = M NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, nd4j::NDArray* Y, const double alpha, const double beta, const char outOrder) { int xLenDim, yLenDim(0); if(A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !"); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !"); const auto M = A->sizeAt(0); const auto N = A->sizeAt(1); if(Y != nullptr && !shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !"); if(X->lengthOf() != N) throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !"); if(Y != nullptr && Y->lengthOf() != M) throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !"); if(Y == nullptr) Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()), A->getContext()); NDArray *pA(const_cast<NDArray*>(A)); if(A->ews() != 1) pA = pA->dup('f'); const bool transA = pA->ordering() == 'c'; const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N; int lda, lta; if(transA) { lda = N; lta = M; } else { lda = M; lta = N; } const int incx = X->stridesOf()[xLenDim]; const int incy = Y->stridesOf()[yLenDim]; const auto aType = pA->dataType(); const auto xType = X->dataType(); const auto yType = Y->dataType(); auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle()); auto stream = A->getContext()->getCudaStream(); auto status = cublasSetStream_v2(*handle, *stream); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY); NDArray::prepareSpecialUse({Y}, {pA, X}); // choose appropriate cuda gemm api depending on data types if(AXY && aType == DataType::DOUBLE) { status = cublasDgemv(*handle, transAblas, lda, lta, &alpha, (double*)pA->getSpecialBuffer(), lda, (double*)X->getSpecialBuffer(), incx, &beta, (double*)Y->getSpecialBuffer(), incy); } else if(AXY && aType == DataType::FLOAT32) { float alphaF(alpha), betaF(beta); status = cublasSgemv(*handle, transAblas, lda, lta, &alphaF, (float*)pA->getSpecialBuffer(), lda, (float*)X->getSpecialBuffer(), incx, &betaF, (float*)Y->getSpecialBuffer(), incy); } else { dim3 threadsPerBlock(M); dim3 blocksPerGrid(1); if (M > 512){ threadsPerBlock.x = 512; blocksPerGrid.x = math::nd4j_ceil<double, int>(static_cast<double>(M) / threadsPerBlock.x); // rows } //BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualGemv, (blocksPerGrid, threadsPerBlock, stream, transA, M, N, alpha, pA->getSpecialBuffer(), lda, X->getSpecialBuffer(), incx, beta, Y->getSpecialBuffer(), incy), NUMERIC_TYPES) } if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status); auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult); NDArray::registerSpecialUse({Y}, {pA, X}); if(pA != A) delete pA; return Y; } //////////////////////////////////////////////////////////////////////////// // (X * Y) = Z[0] NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, nd4j::NDArray* Z, const double alpha, const double beta) { int xLenDim(0), yLenDim(0); if(!shape::isCommonVector(X->getShapeInfo(), xLenDim)) throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !"); if(!shape::isCommonVector(Y->getShapeInfo(), yLenDim)) throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !"); if(Z != nullptr && !Z->isScalar()) throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !"); const auto length = X->lengthOf(); if(Y->lengthOf() != length) throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !"); if(Z == nullptr) Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext()); const Nd4jLong incx = X->stridesOf()[xLenDim]; const Nd4jLong incy = Y->stridesOf()[yLenDim]; const auto xType = X->dataType(); const auto yType = Y->dataType(); const auto zType = Z->dataType(); if(!X->isActualOnDeviceSide()) X->syncToDevice(); if(!Y->isActualOnDeviceSide()) Y->syncToDevice(); if(!Z->isActualOnDeviceSide()) Z->syncToDevice(); cudaStream_t* stream = X->getContext()->getCudaStream(); dim3 threadsPerBlock(512); dim3 blocksPerGrid(1); if (length > 512) threadsPerBlock.x = math::nd4j_ceil<double, int>(static_cast<double>(length) / 512); NDArray::prepareSpecialUse({Z}, {X, Y}); //BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha, X->getSpecialBuffer(), incx, Y->getSpecialBuffer(), incy, beta, Z->getSpecialBuffer()), NUMERIC_TYPES) auto cudaResult = cudaStreamSynchronize(*stream); if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult); NDArray::registerSpecialUse({Z}, {X, Y}); return Z; } //BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); //BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const Nd4jLong length, const double alpha, const void* vX, const Nd4jLong incx, const void* vY, const Nd4jLong incy, const double beta, void* vZ), NUMERIC_TYPES, NUMERIC_TYPES, FLOAT_TYPES); }
89afbefaba46a055fd3f756315f16e5ffca6775f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <stdio.h> #include "transpose_kernel.cu" #include "summed_area_table.h" __global__ void compute_grandient_kernel(float* Integral_image,float* Grandient_image,int w,int h,int kernel_size) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int index = x * w + y; if ( ( y >( kernel_size / 2 +1 ) ) && ( y < ( w - kernel_size / 2 ) ) && ( x > ( kernel_size / 2 ) ) && ( x < ( h - kernel_size / 2 ) ) ) { float B1 = Integral_image[ ( (x - kernel_size / 2) ) * w + (y + kernel_size / 2) ]; float A1 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y + kernel_size / 2) ]; float D1 = Integral_image[ (x - kernel_size / 2 + 1-1) * w + (y + 1-1) ]; float C1 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y + 1-1) ]; float B2 = Integral_image[ ( (x - kernel_size / 2) + 1-1) * w + (y - 1) ]; float A2 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y - 1) ]; float D2 = Integral_image[ (x - kernel_size / 2 + 1-1) * w + (y - kernel_size / 2-1) ]; float C2 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y - kernel_size / 2-1) ]; float sum1 = A1 + D1 - B1 - C1; float sum2 = A2 + D2 - B2 - C2; Grandient_image[index] = (sum1 - sum2) / (kernel_size * kernel_size) ; } else { Grandient_image[index] = 0; } } void callComputeGradient( float* Integral_image, float* Grandient_image, int widthImage, int heightImage, int kernel_size, int threadsX, int threadsY,hipStream_t stream) { dim3 block(threadsX, threadsY, 1); dim3 grid(heightImage / block.x, widthImage / block.y, 1); hipLaunchKernelGGL(( compute_grandient_kernel), dim3(grid), dim3(block),0,stream, Integral_image, Grandient_image, widthImage, heightImage, kernel_size); //hipDeviceSynchronize(); } void compute_gx(float * integral_in, float * grandient_out, unsigned int h, unsigned int w, int kernel_size, int threadsX, int threadsY,hipStream_t stream ) { callComputeGradient( integral_in, grandient_out, w, h, kernel_size, threadsX, threadsY,stream) ; } // Last two arguments are temporaries void compute_gy( float * integral_in, float * grandient_out, unsigned int h, unsigned int w, int kernel_size, int threadsX, int threadsY, float * temp1, float * temp2,hipStream_t stream) { thrust::device_ptr<float> integral_in_thrust( integral_in ); thrust::device_ptr<float> grandient_out_thrust( grandient_out ); thrust::device_ptr<float> temp1_thrust( temp1 ); thrust::device_ptr<float> temp2_thrust( temp2 ); transpose(h, w, integral_in_thrust, temp1_thrust ); int wT = h; int hT = w; // compute gradient callComputeGradient( temp1, temp2, wT, hT, kernel_size, threadsX, threadsY,stream) ; // transpose gradient transpose( hT, wT, temp2_thrust, grandient_out_thrust ) ; }
89afbefaba46a055fd3f756315f16e5ffca6775f.cu
#pragma once #include <stdio.h> #include "transpose_kernel.cu" #include "summed_area_table.h" __global__ void compute_grandient_kernel(float* Integral_image,float* Grandient_image,int w,int h,int kernel_size) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int index = x * w + y; if ( ( y >( kernel_size / 2 +1 ) ) && ( y < ( w - kernel_size / 2 ) ) && ( x > ( kernel_size / 2 ) ) && ( x < ( h - kernel_size / 2 ) ) ) { float B1 = Integral_image[ ( (x - kernel_size / 2) ) * w + (y + kernel_size / 2) ]; float A1 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y + kernel_size / 2) ]; float D1 = Integral_image[ (x - kernel_size / 2 + 1-1) * w + (y + 1-1) ]; float C1 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y + 1-1) ]; float B2 = Integral_image[ ( (x - kernel_size / 2) + 1-1) * w + (y - 1) ]; float A2 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y - 1) ]; float D2 = Integral_image[ (x - kernel_size / 2 + 1-1) * w + (y - kernel_size / 2-1) ]; float C2 = Integral_image[ (x + kernel_size / 2 - 1) * w + (y - kernel_size / 2-1) ]; float sum1 = A1 + D1 - B1 - C1; float sum2 = A2 + D2 - B2 - C2; Grandient_image[index] = (sum1 - sum2) / (kernel_size * kernel_size) ; } else { Grandient_image[index] = 0; } } void callComputeGradient( float* Integral_image, float* Grandient_image, int widthImage, int heightImage, int kernel_size, int threadsX, int threadsY,cudaStream_t stream) { dim3 block(threadsX, threadsY, 1); dim3 grid(heightImage / block.x, widthImage / block.y, 1); compute_grandient_kernel<<<grid, block,0,stream>>>( Integral_image, Grandient_image, widthImage, heightImage, kernel_size); //cudaThreadSynchronize(); } void compute_gx(float * integral_in, float * grandient_out, unsigned int h, unsigned int w, int kernel_size, int threadsX, int threadsY,cudaStream_t stream ) { callComputeGradient( integral_in, grandient_out, w, h, kernel_size, threadsX, threadsY,stream) ; } // Last two arguments are temporaries void compute_gy( float * integral_in, float * grandient_out, unsigned int h, unsigned int w, int kernel_size, int threadsX, int threadsY, float * temp1, float * temp2,cudaStream_t stream) { thrust::device_ptr<float> integral_in_thrust( integral_in ); thrust::device_ptr<float> grandient_out_thrust( grandient_out ); thrust::device_ptr<float> temp1_thrust( temp1 ); thrust::device_ptr<float> temp2_thrust( temp2 ); transpose(h, w, integral_in_thrust, temp1_thrust ); int wT = h; int hT = w; // compute gradient callComputeGradient( temp1, temp2, wT, hT, kernel_size, threadsX, threadsY,stream) ; // transpose gradient transpose( hT, wT, temp2_thrust, grandient_out_thrust ) ; }
3829ce08265d54bdb6129390acd8e0fbfcf410f3.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "util.h" #include "CudaStream.h" #include "CudaEvent.h" #define USE_PINNED // CUDA kernel implementing axpy: // y += alpha*x __global__ void axpy(int n, double alpha, const double *x, double* y) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid<n) { y[tid] += alpha*x[tid]; } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 20); int num_chunks = read_arg(argc, argv, 2, 1); size_t N = 1 << pow; auto size_in_bytes = N * sizeof(double); std::cout << "memcopy and daxpy test of length N = " << N << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl; hipInit(0); double* xd = malloc_device<double>(N); double* yd = malloc_device<double>(N); double* xh = malloc_host_pinned<double>(N, 1.5); double* yh = malloc_host_pinned<double>(N, 3.0); double* y = malloc_host_pinned<double>(N, 0.0); int chunk_size = N/num_chunks; // assume N % num_chunks == 0 // precompute kernel launch configuration auto block_dim = 128ul; auto grid_dim = chunk_size/block_dim + (chunk_size%block_dim ? 1 : 0); CudaStream D2H_stream(true); CudaStream H2D_stream(true); CudaStream kernel_stream(true); auto start_event = D2H_stream.enqueue_event(); for(int i=0; i<num_chunks; ++i) { auto offset = i*chunk_size; // copy chunk to device copy_to_device_async<double>(xh+offset, xd+offset, chunk_size, H2D_stream.stream()); copy_to_device_async<double>(yh+offset, yd+offset, chunk_size, H2D_stream.stream()); // force the kernel stream to wait for the memcpy auto H2D_event = H2D_stream.enqueue_event(); kernel_stream.wait_on_event(H2D_event); // y += 2 * x hipLaunchKernelGGL(( axpy), dim3(grid_dim), dim3(block_dim), 0, kernel_stream.stream(), chunk_size, 2.0, xd+offset, yd+offset); cuda_check_last_kernel("axpy kernel"); // copy chunk of result back to host auto kernel_event = kernel_stream.enqueue_event(); D2H_stream.wait_on_event(kernel_event); copy_to_host_async<double>(yd+offset, y+offset, chunk_size, D2H_stream.stream()); } auto end_event = D2H_stream.enqueue_event(); end_event.wait(); auto time_total = end_event.time_since(start_event); std::cout << "-------\ntimings\n-------" << std::endl; std::cout << "total : " << time_total << std::endl; // check for errors auto errors = 0; for(auto i=0; i<N; ++i) { if(::fabs(6.-y[i])>1e-15) { errors++; } } if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl; else std::cout << "\n============ PASSED" << std::endl; hipFree(xd); hipFree(yd); hipHostFree(xh); hipHostFree(yh); hipHostFree(y); return 0; }
3829ce08265d54bdb6129390acd8e0fbfcf410f3.cu
#include <iostream> #include <cuda.h> #include "util.h" #include "CudaStream.h" #include "CudaEvent.h" #define USE_PINNED // CUDA kernel implementing axpy: // y += alpha*x __global__ void axpy(int n, double alpha, const double *x, double* y) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if(tid<n) { y[tid] += alpha*x[tid]; } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 20); int num_chunks = read_arg(argc, argv, 2, 1); size_t N = 1 << pow; auto size_in_bytes = N * sizeof(double); std::cout << "memcopy and daxpy test of length N = " << N << " : " << size_in_bytes/(1024.*1024.) << "MB" << std::endl; cuInit(0); double* xd = malloc_device<double>(N); double* yd = malloc_device<double>(N); double* xh = malloc_host_pinned<double>(N, 1.5); double* yh = malloc_host_pinned<double>(N, 3.0); double* y = malloc_host_pinned<double>(N, 0.0); int chunk_size = N/num_chunks; // assume N % num_chunks == 0 // precompute kernel launch configuration auto block_dim = 128ul; auto grid_dim = chunk_size/block_dim + (chunk_size%block_dim ? 1 : 0); CudaStream D2H_stream(true); CudaStream H2D_stream(true); CudaStream kernel_stream(true); auto start_event = D2H_stream.enqueue_event(); for(int i=0; i<num_chunks; ++i) { auto offset = i*chunk_size; // copy chunk to device copy_to_device_async<double>(xh+offset, xd+offset, chunk_size, H2D_stream.stream()); copy_to_device_async<double>(yh+offset, yd+offset, chunk_size, H2D_stream.stream()); // force the kernel stream to wait for the memcpy auto H2D_event = H2D_stream.enqueue_event(); kernel_stream.wait_on_event(H2D_event); // y += 2 * x axpy<<<grid_dim, block_dim, 0, kernel_stream.stream()>>> (chunk_size, 2.0, xd+offset, yd+offset); cuda_check_last_kernel("axpy kernel"); // copy chunk of result back to host auto kernel_event = kernel_stream.enqueue_event(); D2H_stream.wait_on_event(kernel_event); copy_to_host_async<double>(yd+offset, y+offset, chunk_size, D2H_stream.stream()); } auto end_event = D2H_stream.enqueue_event(); end_event.wait(); auto time_total = end_event.time_since(start_event); std::cout << "-------\ntimings\n-------" << std::endl; std::cout << "total : " << time_total << std::endl; // check for errors auto errors = 0; for(auto i=0; i<N; ++i) { if(std::fabs(6.-y[i])>1e-15) { errors++; } } if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors" << std::endl; else std::cout << "\n============ PASSED" << std::endl; cudaFree(xd); cudaFree(yd); cudaFreeHost(xh); cudaFreeHost(yh); cudaFreeHost(y); return 0; }
080c67681ef3afde5717597ce6ce13f44fcfba5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Composite{((i0 * i1) + (i2 * i3))}}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True,)) // Input 1 CudaNdarrayType(float32, vector) // Input 2 CudaNdarrayType(float32, (True,)) // Input 3 CudaNdarrayType(float32, vector) // Output 0 CudaNdarrayType(float32, vector) static __global__ void kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , const float * i3_data, int i3_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; const float ii_i2_value = i2_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i1_data = i1_data; const float * ii_i3_data = i3_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i1_data += pos0 * i1_str_0; ii_i3_data += pos0 * i3_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i2_value * ii_i3_data[0]; npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i0_value * ii_i1_data[0]; o0_i = V_DUMMY_ID__tmp2 + V_DUMMY_ID__tmp1; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 * i1) + (i2 * i3))}}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True,)) // Input 1 CudaNdarrayType(float32, vector) // Input 2 CudaNdarrayType(float32, (True,)) // Input 3 CudaNdarrayType(float32, vector) // Output 0 CudaNdarrayType(float32, vector) static __global__ void kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , const float * i3_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; const float ii_i2_value = i2_data[0]; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i2_value * i3_data[i]; npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i0_value * i1_data[i]; o0_i = V_DUMMY_ID__tmp2 + V_DUMMY_ID__tmp1; } o0_data[i] = o0_i; } } static void can_collapse_node_7a7785233d95e11377dbbaa3365ca50c_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_7a7785233d95e11377dbbaa3365ca50c_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, float * o0_data, const int * o0_str) { numEls = dims[0]*1; int local_dims[1]; int local_str[4][1]; int local_ostr[1][1]; int nd_collapse = 1; for(int i=0;i<1;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<1;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<1;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<1;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<1;i++){//init new strides local_str[3][i]=i3_str[i]; } for(int i=0;i<1;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<4;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<4;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[1] = {1}; int nd_collapse_1[1] = {1}; can_collapse_node_7a7785233d95e11377dbbaa3365ca50c_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_3[1] = {1}; can_collapse_node_7a7785233d95e11377dbbaa3365ca50c_0(nd_collapse, local_dims, local_str[3], nd_collapse_3); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_3[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[3][i-1]=local_str[3][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[3][j-1]=local_str[3][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[1][nd_collapse-1]==1 && local_str[3][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(1,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, i2_data, i3_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7a7785233d95e11377dbbaa3365ca50c_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7a7785233d95e11377dbbaa3365ca50c_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V9; PyObject* storage_V1; __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V9); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V9 = storage_V9; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_9: double __DUMMY_9; __label_12: double __DUMMY_12; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V9); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; PyObject* py_V9; CudaNdarray * V9; { py_V1 = Py_None; {Py_XINCREF(py_V1);} V1 = NULL; { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V3)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n"; //std::cerr << "c_extract " << V3->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V3)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "bcast check 0 passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V7)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V7 << "checking bcast 0 <" << V7->str<< ">\n"; //std::cerr << "c_extract " << V7->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V7)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "bcast check 0 passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { py_V9 = PyList_GET_ITEM(storage_V9, 0); {Py_XINCREF(py_V9);} assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V9)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); V9 = (CudaNdarray*)py_V9; //std::cerr << "c_extract " << V9 << '\n'; if (V9->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V9->nd); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << " nd check passed\n"; assert(V9); Py_INCREF(py_V9); } else if (py_V9 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract done " << V9 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} START\n"; //standard elemwise size checks int dims[1] = {1}; int broadcasts_V3[1] = {1}; int broadcasts_V5[1] = {0}; int broadcasts_V7[1] = {1}; int broadcasts_V9[1] = {0}; //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V3\n"; if (1 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V3->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V5\n"; if (1 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V5->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V7\n"; if (1 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V7->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V9\n"; if (1 != V9->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V9->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i]; if ((!(broadcasts_V9[i] && CudaNdarray_HOST_DIMS(V9)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V9)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V9 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 3 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V9)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } Py_XDECREF(V1); V1 = V5; Py_INCREF(V1); for (int i = 0; (i< 1) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 1, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_7a7785233d95e11377dbbaa3365ca50c_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } else // no error { } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} END\n"; __label_11: double __DUMMY_11; } __label_10: //std::cerr << "cleanup " << py_V9 << " " << V9 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); if (V9) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt)); Py_XDECREF(V9); } //std::cerr << "cleanup done" << py_V9 << "\n"; {Py_XDECREF(py_V9);} double __DUMMY_10; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_executor(__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c* self) { return self->run(); } static void __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_destructor(void* executor, void* self) { delete ((__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (6 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 6, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c* struct_ptr = new __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_executor), struct_ptr, __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init7a7785233d95e11377dbbaa3365ca50c(void){ (void) Py_InitModule("7a7785233d95e11377dbbaa3365ca50c", MyMethods); }
080c67681ef3afde5717597ce6ce13f44fcfba5f.cu
#include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Composite{((i0 * i1) + (i2 * i3))}}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True,)) // Input 1 CudaNdarrayType(float32, vector) // Input 2 CudaNdarrayType(float32, (True,)) // Input 3 CudaNdarrayType(float32, vector) // Output 0 CudaNdarrayType(float32, vector) static __global__ void kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , const float * i3_data, int i3_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; const float ii_i2_value = i2_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i1_data = i1_data; const float * ii_i3_data = i3_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i1_data += pos0 * i1_str_0; ii_i3_data += pos0 * i3_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i2_value * ii_i3_data[0]; npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i0_value * ii_i1_data[0]; o0_i = V_DUMMY_ID__tmp2 + V_DUMMY_ID__tmp1; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 * i1) + (i2 * i3))}}[(0, 1)] // node.op.destroy_map={0: [1]} // Input 0 CudaNdarrayType(float32, (True,)) // Input 1 CudaNdarrayType(float32, vector) // Input 2 CudaNdarrayType(float32, (True,)) // Input 3 CudaNdarrayType(float32, vector) // Output 0 CudaNdarrayType(float32, vector) static __global__ void kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , const float * i3_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i0_value = i0_data[0]; const float ii_i2_value = i2_data[0]; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i2_value * i3_data[i]; npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i0_value * i1_data[i]; o0_i = V_DUMMY_ID__tmp2 + V_DUMMY_ID__tmp1; } o0_data[i] = o0_i; } } static void can_collapse_node_7a7785233d95e11377dbbaa3365ca50c_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_7a7785233d95e11377dbbaa3365ca50c_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, float * o0_data, const int * o0_str) { numEls = dims[0]*1; int local_dims[1]; int local_str[4][1]; int local_ostr[1][1]; int nd_collapse = 1; for(int i=0;i<1;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<1;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<1;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<1;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<1;i++){//init new strides local_str[3][i]=i3_str[i]; } for(int i=0;i<1;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<4;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<4;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[1] = {1}; int nd_collapse_1[1] = {1}; can_collapse_node_7a7785233d95e11377dbbaa3365ca50c_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_3[1] = {1}; can_collapse_node_7a7785233d95e11377dbbaa3365ca50c_0(nd_collapse, local_dims, local_str[3], nd_collapse_3); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_3[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[3][i-1]=local_str[3][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[3][j-1]=local_str[3][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[1][nd_collapse-1]==1 && local_str[3][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(1,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7a7785233d95e11377dbbaa3365ca50c_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7a7785233d95e11377dbbaa3365ca50c_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7a7785233d95e11377dbbaa3365ca50c_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V9; PyObject* storage_V1; __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V9); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V9 = storage_V9; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_9: double __DUMMY_9; __label_12: double __DUMMY_12; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V9); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; PyObject* py_V9; CudaNdarray * V9; { py_V1 = Py_None; {Py_XINCREF(py_V1);} V1 = NULL; { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V3)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V3 << "checking bcast 0 <" << V3->str<< ">\n"; //std::cerr << "c_extract " << V3->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V3)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V3)[0], 0); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << "bcast check 0 passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V7)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V7 << "checking bcast 0 <" << V7->str<< ">\n"; //std::cerr << "c_extract " << V7->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V7)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "bcast check 0 passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { py_V9 = PyList_GET_ITEM(storage_V9, 0); {Py_XINCREF(py_V9);} assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V9)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); V9 = (CudaNdarray*)py_V9; //std::cerr << "c_extract " << V9 << '\n'; if (V9->nd != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 1", V9->nd); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << " nd check passed\n"; assert(V9); Py_INCREF(py_V9); } else if (py_V9 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract done " << V9 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} START\n"; //standard elemwise size checks int dims[1] = {1}; int broadcasts_V3[1] = {1}; int broadcasts_V5[1] = {0}; int broadcasts_V7[1] = {1}; int broadcasts_V9[1] = {0}; //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V3\n"; if (1 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V3->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V5\n"; if (1 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V5->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V7\n"; if (1 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V7->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V9\n"; if (1 != V9->nd) { PyErr_Format(PyExc_TypeError, "need 1 dims, not %i", V9->nd); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } for (int i = 0; i< 1; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i]; if ((!(broadcasts_V9[i] && CudaNdarray_HOST_DIMS(V9)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V9)[i])) { //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} checking input V9 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 3 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V9)[i], dims[i] ); { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } Py_XDECREF(V1); V1 = V5; Py_INCREF(V1); for (int i = 0; (i< 1) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 1, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_7a7785233d95e11377dbbaa3365ca50c_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 11; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_11;}; } else // no error { } } //std::cerr << "C_CODE Composite{((i0 * i1) + (i2 * i3))} END\n"; __label_11: double __DUMMY_11; } __label_10: //std::cerr << "cleanup " << py_V9 << " " << V9 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); if (V9) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt)); Py_XDECREF(V9); } //std::cerr << "cleanup done" << py_V9 << "\n"; {Py_XDECREF(py_V9);} double __DUMMY_10; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_executor(__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c* self) { return self->run(); } static void __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_destructor(void* executor, void* self) { delete ((__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (6 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 6, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c* struct_ptr = new __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_executor), struct_ptr, __struct_compiled_op_7a7785233d95e11377dbbaa3365ca50c_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init7a7785233d95e11377dbbaa3365ca50c(void){ (void) Py_InitModule("7a7785233d95e11377dbbaa3365ca50c", MyMethods); }
7077173d7688c177b40cd79f0ea3b7508579434b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void histogram_kernel(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins){ extern __shared__ unsigned int bins_s[]; //Shared Memory int thid = threadIdx.x; while(thid < num_bins){ bins_s[thid] = 0u; thid += blockDim.x; } __syncthreads(); //Histogram calculation unsigned int element = blockIdx.x * blockDim.x + threadIdx.x; while(element < num_elements){ atomicAdd(&(bins_s[input[element]]), 1); element += blockDim.x * gridDim.x; } __syncthreads(); //Global Memory thid = threadIdx.x; while(thid < num_bins){ atomicAdd(&(bins[thid]), bins_s[thid]); thid += blockDim.x; } }
7077173d7688c177b40cd79f0ea3b7508579434b.cu
#include "includes.h" __global__ void histogram_kernel(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins){ extern __shared__ unsigned int bins_s[]; //Shared Memory int thid = threadIdx.x; while(thid < num_bins){ bins_s[thid] = 0u; thid += blockDim.x; } __syncthreads(); //Histogram calculation unsigned int element = blockIdx.x * blockDim.x + threadIdx.x; while(element < num_elements){ atomicAdd(&(bins_s[input[element]]), 1); element += blockDim.x * gridDim.x; } __syncthreads(); //Global Memory thid = threadIdx.x; while(thid < num_bins){ atomicAdd(&(bins[thid]), bins_s[thid]); thid += blockDim.x; } }
1ac891f851889ccc40e6d1cc2d6e8a57b003f745.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rdiv_float.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *sum = NULL; hipMalloc(&sum, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rdiv_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,sum); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rdiv_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,sum); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rdiv_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,sum); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1ac891f851889ccc40e6d1cc2d6e8a57b003f745.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rdiv_float.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *sum = NULL; cudaMalloc(&sum, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rdiv_float<<<gridBlock,threadBlock>>>(n,a,b,sum); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rdiv_float<<<gridBlock,threadBlock>>>(n,a,b,sum); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rdiv_float<<<gridBlock,threadBlock>>>(n,a,b,sum); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1174841de66b68a3905061f1a6472b3f11bdf194.hip
// !!! This is a file automatically generated by hipify!!! /* This is machine problem 2, binning * The problem is that you have particles in a 3D domain * which is quantized into blocks or bins. You want to figure * out which block each particle belongs to. * Use the atomic functions that you learned about in lecture 3 * to implement the same functionality as the reference version on the cpu. * * FOR EXTRA CREDIT: * Write a version of your binning kernel that uses atomics hierarchically, * accumulating updates first into shared memory and then merging the results * from shared memory into the global memory. * As a hint, think about binning particles first into a coarse grid in a first kernel, * and then binning the particles from each coarse bin into the * final bins in a second kernel. */ /* * SUBMISSION INSTRUCTIONS * ========================= * * You can submit your entire working directory for this assignment * from any of the cluster machines by using our submit script. We want to be able * to just run "make" to compile your code. * The submit script bundles the entire current directory into * a submission. Thus, you use it by CDing to a the directory for your assignment, * and running: * * > cd *some directory* * > /usr/class/cs193g/bin/submit mp2 * * This will submit the current directory as your assignment. You can submit * as many times as you want, and we will use your last submission. */ #include <stdlib.h> #include <stdio.h> #include <ctime> #include <hip/hip_runtime.h> #include <assert.h> #include "mp2-util.h" // TODO enable this to print debugging information // const bool print_debug = true; const bool print_debug = false; event_pair timer; // the particle coordinates are already normalized (in the domain [0,1] ) // gridding provides the base 2 log of how finely the domain is subdivided // in each direction. So gridding.x == 6 means that the x-axis is subdivided // into 64 parts. (i.e. 2^(gridding.x) = number of bins on x axis) // Overall there cannot be more than 4B bins, so we can just concatenate the bin // indices into a single uint. __host__ __device__ unsigned int bin_index(float3 particle, int3 gridding) { unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x)); unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y)); unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z)); unsigned int index = 0; index |= z_index; index <<= gridding.y; index |= y_index; index <<= gridding.x; index |= x_index; return index; } void host_binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length) { for(int i=0;i<array_length;i++) { unsigned int bin = bin_index(particles[i],gridding); if(bin_counters[bin] < bin_size) { unsigned int offset = bin_counters[bin]; // let's not do the whole precrement / postcrement thing... bin_counters[bin]++; bins[bin*bin_size + offset] = i; } else { *overflow_flag = true; } } } bool cross_check_results(int * h_bins, int * h_bins_checker, int * h_bin_counters, int * h_bin_counters_checker, int * h_particles_binids_checker, int num_particles, int num_bins, int bin_size) { int error = 0; for(int i=0;i<num_bins;i++) { if(h_bin_counters[i] != h_bin_counters_checker[i]) { if(print_debug) fprintf(stderr,"mismatch! bin %d: cuda:%d host:%d particles \n",i,h_bin_counters[i],h_bin_counters_checker[i]); error = 1; } for(int j=0; j<bin_size;j++) { // record which these particles went into bin i in the reference version if(h_bins_checker[i*bin_size+j] != -1) { h_particles_binids_checker[h_bins_checker[i*bin_size+j]] = i; } } for(int j=0; j<bin_size;j++) { if(h_bins_checker[i*bin_size+j] != -1) { if(h_particles_binids_checker[h_bins[i*bin_size+j]] != i) { error = 1; } } } } if(error) { printf("Output of CUDA version and normal version didn't match! \n"); } else { printf("Worked! CUDA and reference output match. \n"); } return error; } #define CHECK(call) { \ hipError_t err = hipSuccess; \ if ( (err = (call)) != hipSuccess) { \ fprintf(stderr, "Got error %s at %s:%d\n", hipGetErrorString(err), __FILE__, __LINE__); \ exit(1); \ }\ } __global__ void device_binning_kernel(float3 *particles, int *bins, int *bin_counters, int3 gridding, int bin_size, int array_length) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if(gid >= array_length) return; { unsigned int bin = bin_index(particles[gid],gridding); if(bin_counters[bin] < bin_size) { // let's not do the whole precrement / postcrement thing... unsigned int offset = atomicAdd(&bin_counters[bin],1); bins[bin*bin_size + offset] = gid; } } } template <typename T> __global__ void initialize(T *array,T value, unsigned int array_length) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if(gid < array_length) { array[gid] = value; } } void device_binning(float3 * h_particles, int * h_bins, int * h_bin_counters, int3 gridding, int num_particles, int num_bins, int bin_size) { // TODO: your implementation here // How do I call a templated kernel? It's actually easy... // int* array; // int value = 0; // int array_length = 0; // initialize<<<griddim,blockdim>>>(array, value, array_length); // The compiler will figure out the types of your arguments and codegen a implementation for each type you use. float3 * d_particles = nullptr; CHECK(hipMalloc((void**) &d_particles, num_particles * sizeof(float3))); CHECK(hipMemcpy(d_particles, h_particles, num_particles * sizeof(float3), hipMemcpyHostToDevice)); dim3 THRD_SZ(512); dim3 GRID_SZ((num_bins * bin_size + THRD_SZ.x-1)/THRD_SZ.x); int * d_bins = nullptr; CHECK(hipMalloc((void**) &d_bins, num_bins * bin_size * sizeof(int))); //initialize the d_bins to -1 hipLaunchKernelGGL(( initialize), dim3(GRID_SZ),dim3(THRD_SZ), 0, 0, d_bins, -1, num_bins * bin_size); int * d_bin_counters = nullptr; CHECK(hipMalloc((void**) &d_bin_counters, num_bins * sizeof(int))); THRD_SZ = (512); GRID_SZ = ((num_bins + THRD_SZ.x-1)/THRD_SZ.x); //initialize the d_bin_counters to 0 hipLaunchKernelGGL(( initialize), dim3(GRID_SZ),dim3(THRD_SZ), 0, 0, d_bin_counters, 0, num_bins); THRD_SZ = (512); GRID_SZ = ((num_particles + THRD_SZ.x-1)/THRD_SZ.x); hipLaunchKernelGGL(( device_binning_kernel), dim3(GRID_SZ),dim3(THRD_SZ), 0, 0, d_particles, d_bins, d_bin_counters, gridding, bin_size, num_particles); CHECK(hipMemcpy(h_bins, d_bins, num_bins * bin_size * sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(h_bin_counters, d_bin_counters, num_bins * sizeof(int), hipMemcpyDeviceToHost)); //Free allocated memory in the device CHECK(hipFree(d_particles)); CHECK(hipFree(d_bins)); CHECK(hipFree(d_bin_counters)); } int main(void) { // create arrays of 8M elements int num_particles = 8*1024*1024; int log_bpd = 6; //int log_bpd = 0; int bins_per_dim = 1 << log_bpd; unsigned int num_bins = bins_per_dim*bins_per_dim*bins_per_dim; // extra space to account for load imbalance to prevent frequent aborts due to bin overflow int bin_size = num_particles/num_bins * 3; int3 gridding = make_int3(log_bpd,log_bpd,log_bpd); float3 *h_particles = 0; int *h_bins = 0; int *h_bin_counters = 0; int *h_bins_checker = 0; float3 *h_particles_checker = 0; int *h_bin_counters_checker = 0; int *h_particles_binids_checker = 0; int h_overflow_flag_checker = 0; // malloc host array h_particles = (float3*)malloc(num_particles * sizeof(float3)); h_bins = (int*)malloc(num_bins * bin_size * sizeof(int)); h_bin_counters = (int*)malloc(num_bins * sizeof(int)); h_particles_checker = (float3*)malloc(num_particles * sizeof(float3)); h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int)); h_particles_binids_checker = (int*)malloc(num_bins * bin_size * sizeof(int)); h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int)); // if either memory allocation failed, report an error message if(h_particles == 0 || h_bins == 0 || h_bin_counters == 0 || h_bins_checker == 0 || h_bin_counters_checker == 0 || h_particles_binids_checker == 0) { printf("couldn't allocate memory\n"); exit(1); } // generate random input // initialize srand(13); for(int i=0;i< num_particles;i++) { h_particles[i] = h_particles_checker[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX); } for(int i=0;i<num_bins;i++) { h_bin_counters[i] = h_bin_counters_checker[i] = 0; } for(int i=0;i<num_bins*bin_size;i++) { h_bins[i] = h_bins_checker[i] = h_particles_binids_checker[i] = -1; } device_binning(h_particles, h_bins, h_bin_counters, gridding, num_particles, num_bins, bin_size); // generate reference output start_timer(&timer); host_binning(h_particles_checker, h_bins_checker, h_bin_counters_checker, &h_overflow_flag_checker, gridding, bin_size, num_particles); stop_timer(&timer,"cpu binning"); if(h_overflow_flag_checker) { printf("one of the bins overflowed!\n"); exit(1); } // check CUDA output versus reference output cross_check_results(h_bins, h_bins_checker, h_bin_counters, h_bin_counters_checker, h_particles_binids_checker, num_particles, num_bins, bin_size); // deallocate memory free(h_particles); free(h_bins); free(h_bin_counters); free(h_particles_checker); free(h_bins_checker); free(h_particles_binids_checker); free(h_bin_counters_checker); return 0; }
1174841de66b68a3905061f1a6472b3f11bdf194.cu
/* This is machine problem 2, binning * The problem is that you have particles in a 3D domain * which is quantized into blocks or bins. You want to figure * out which block each particle belongs to. * Use the atomic functions that you learned about in lecture 3 * to implement the same functionality as the reference version on the cpu. * * FOR EXTRA CREDIT: * Write a version of your binning kernel that uses atomics hierarchically, * accumulating updates first into shared memory and then merging the results * from shared memory into the global memory. * As a hint, think about binning particles first into a coarse grid in a first kernel, * and then binning the particles from each coarse bin into the * final bins in a second kernel. */ /* * SUBMISSION INSTRUCTIONS * ========================= * * You can submit your entire working directory for this assignment * from any of the cluster machines by using our submit script. We want to be able * to just run "make" to compile your code. * The submit script bundles the entire current directory into * a submission. Thus, you use it by CDing to a the directory for your assignment, * and running: * * > cd *some directory* * > /usr/class/cs193g/bin/submit mp2 * * This will submit the current directory as your assignment. You can submit * as many times as you want, and we will use your last submission. */ #include <stdlib.h> #include <stdio.h> #include <ctime> #include <cuda.h> #include <assert.h> #include "mp2-util.h" // TODO enable this to print debugging information // const bool print_debug = true; const bool print_debug = false; event_pair timer; // the particle coordinates are already normalized (in the domain [0,1] ) // gridding provides the base 2 log of how finely the domain is subdivided // in each direction. So gridding.x == 6 means that the x-axis is subdivided // into 64 parts. (i.e. 2^(gridding.x) = number of bins on x axis) // Overall there cannot be more than 4B bins, so we can just concatenate the bin // indices into a single uint. __host__ __device__ unsigned int bin_index(float3 particle, int3 gridding) { unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x)); unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y)); unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z)); unsigned int index = 0; index |= z_index; index <<= gridding.y; index |= y_index; index <<= gridding.x; index |= x_index; return index; } void host_binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length) { for(int i=0;i<array_length;i++) { unsigned int bin = bin_index(particles[i],gridding); if(bin_counters[bin] < bin_size) { unsigned int offset = bin_counters[bin]; // let's not do the whole precrement / postcrement thing... bin_counters[bin]++; bins[bin*bin_size + offset] = i; } else { *overflow_flag = true; } } } bool cross_check_results(int * h_bins, int * h_bins_checker, int * h_bin_counters, int * h_bin_counters_checker, int * h_particles_binids_checker, int num_particles, int num_bins, int bin_size) { int error = 0; for(int i=0;i<num_bins;i++) { if(h_bin_counters[i] != h_bin_counters_checker[i]) { if(print_debug) fprintf(stderr,"mismatch! bin %d: cuda:%d host:%d particles \n",i,h_bin_counters[i],h_bin_counters_checker[i]); error = 1; } for(int j=0; j<bin_size;j++) { // record which these particles went into bin i in the reference version if(h_bins_checker[i*bin_size+j] != -1) { h_particles_binids_checker[h_bins_checker[i*bin_size+j]] = i; } } for(int j=0; j<bin_size;j++) { if(h_bins_checker[i*bin_size+j] != -1) { if(h_particles_binids_checker[h_bins[i*bin_size+j]] != i) { error = 1; } } } } if(error) { printf("Output of CUDA version and normal version didn't match! \n"); } else { printf("Worked! CUDA and reference output match. \n"); } return error; } #define CHECK(call) { \ cudaError_t err = cudaSuccess; \ if ( (err = (call)) != cudaSuccess) { \ fprintf(stderr, "Got error %s at %s:%d\n", cudaGetErrorString(err), __FILE__, __LINE__); \ exit(1); \ }\ } __global__ void device_binning_kernel(float3 *particles, int *bins, int *bin_counters, int3 gridding, int bin_size, int array_length) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if(gid >= array_length) return; { unsigned int bin = bin_index(particles[gid],gridding); if(bin_counters[bin] < bin_size) { // let's not do the whole precrement / postcrement thing... unsigned int offset = atomicAdd(&bin_counters[bin],1); bins[bin*bin_size + offset] = gid; } } } template <typename T> __global__ void initialize(T *array,T value, unsigned int array_length) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if(gid < array_length) { array[gid] = value; } } void device_binning(float3 * h_particles, int * h_bins, int * h_bin_counters, int3 gridding, int num_particles, int num_bins, int bin_size) { // TODO: your implementation here // How do I call a templated kernel? It's actually easy... // int* array; // int value = 0; // int array_length = 0; // initialize<<<griddim,blockdim>>>(array, value, array_length); // The compiler will figure out the types of your arguments and codegen a implementation for each type you use. float3 * d_particles = nullptr; CHECK(cudaMalloc((void**) &d_particles, num_particles * sizeof(float3))); CHECK(cudaMemcpy(d_particles, h_particles, num_particles * sizeof(float3), cudaMemcpyHostToDevice)); dim3 THRD_SZ(512); dim3 GRID_SZ((num_bins * bin_size + THRD_SZ.x-1)/THRD_SZ.x); int * d_bins = nullptr; CHECK(cudaMalloc((void**) &d_bins, num_bins * bin_size * sizeof(int))); //initialize the d_bins to -1 initialize<<<GRID_SZ,THRD_SZ>>>(d_bins, -1, num_bins * bin_size); int * d_bin_counters = nullptr; CHECK(cudaMalloc((void**) &d_bin_counters, num_bins * sizeof(int))); THRD_SZ = (512); GRID_SZ = ((num_bins + THRD_SZ.x-1)/THRD_SZ.x); //initialize the d_bin_counters to 0 initialize<<<GRID_SZ,THRD_SZ>>>(d_bin_counters, 0, num_bins); THRD_SZ = (512); GRID_SZ = ((num_particles + THRD_SZ.x-1)/THRD_SZ.x); device_binning_kernel<<<GRID_SZ,THRD_SZ>>>(d_particles, d_bins, d_bin_counters, gridding, bin_size, num_particles); CHECK(cudaMemcpy(h_bins, d_bins, num_bins * bin_size * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(h_bin_counters, d_bin_counters, num_bins * sizeof(int), cudaMemcpyDeviceToHost)); //Free allocated memory in the device CHECK(cudaFree(d_particles)); CHECK(cudaFree(d_bins)); CHECK(cudaFree(d_bin_counters)); } int main(void) { // create arrays of 8M elements int num_particles = 8*1024*1024; int log_bpd = 6; //int log_bpd = 0; int bins_per_dim = 1 << log_bpd; unsigned int num_bins = bins_per_dim*bins_per_dim*bins_per_dim; // extra space to account for load imbalance to prevent frequent aborts due to bin overflow int bin_size = num_particles/num_bins * 3; int3 gridding = make_int3(log_bpd,log_bpd,log_bpd); float3 *h_particles = 0; int *h_bins = 0; int *h_bin_counters = 0; int *h_bins_checker = 0; float3 *h_particles_checker = 0; int *h_bin_counters_checker = 0; int *h_particles_binids_checker = 0; int h_overflow_flag_checker = 0; // malloc host array h_particles = (float3*)malloc(num_particles * sizeof(float3)); h_bins = (int*)malloc(num_bins * bin_size * sizeof(int)); h_bin_counters = (int*)malloc(num_bins * sizeof(int)); h_particles_checker = (float3*)malloc(num_particles * sizeof(float3)); h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int)); h_particles_binids_checker = (int*)malloc(num_bins * bin_size * sizeof(int)); h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int)); // if either memory allocation failed, report an error message if(h_particles == 0 || h_bins == 0 || h_bin_counters == 0 || h_bins_checker == 0 || h_bin_counters_checker == 0 || h_particles_binids_checker == 0) { printf("couldn't allocate memory\n"); exit(1); } // generate random input // initialize srand(13); for(int i=0;i< num_particles;i++) { h_particles[i] = h_particles_checker[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX); } for(int i=0;i<num_bins;i++) { h_bin_counters[i] = h_bin_counters_checker[i] = 0; } for(int i=0;i<num_bins*bin_size;i++) { h_bins[i] = h_bins_checker[i] = h_particles_binids_checker[i] = -1; } device_binning(h_particles, h_bins, h_bin_counters, gridding, num_particles, num_bins, bin_size); // generate reference output start_timer(&timer); host_binning(h_particles_checker, h_bins_checker, h_bin_counters_checker, &h_overflow_flag_checker, gridding, bin_size, num_particles); stop_timer(&timer,"cpu binning"); if(h_overflow_flag_checker) { printf("one of the bins overflowed!\n"); exit(1); } // check CUDA output versus reference output cross_check_results(h_bins, h_bins_checker, h_bin_counters, h_bin_counters_checker, h_particles_binids_checker, num_particles, num_bins, bin_size); // deallocate memory free(h_particles); free(h_bins); free(h_bin_counters); free(h_particles_checker); free(h_bins_checker); free(h_particles_binids_checker); free(h_bin_counters_checker); return 0; }
966293189901e5e548c56b10844a45dd713687c0.hip
// !!! This is a file automatically generated by hipify!!! /** * block loading rho calculation. should be much faster * system('nvcc -ptx citydist_rho4.cu') * iA is multiple of chunk (16) */ #include <hip/hip_runtime.h> // #include "rocblas.h" #include <math.h> #define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val) #define MIN(A,B) ((A)<(B)) ? (A) : (B) #define MAX(A,B) ((A)>(B)) ? (A) : (B) #define NTHREADS 128 #define NC (1+6*2) // #define NC (9) #define CHUNK 16 #define SINGLE_INF (3.402E+38) /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. * Step through one B at a time */ __global__ void eucldist_sorted_rho_exp(float const *A, float *D, int const nA, int const nneigh, int const nC, float const dc){ int iA = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK; int tx = threadIdx.x; __shared__ float sA[NC][CHUNK]; __shared__ float rho1[NTHREADS][CHUNK]; // cache A if (tx < nC){ //use tx as iC for (int i=0; i<CHUNK; ++i){ if (iA+i < nA){ sA[tx][i] = A[tx + (iA+i)*nC]; }else{ sA[tx][i] = SINGLE_INF; } } } for (int i=0; i<CHUNK; ++i) rho1[tx][i] = 0.0f; __syncthreads(); // fill in the shared memory A float dc2 = dc*dc; int iB_min = MAX(iA - nneigh, 0); int iB_max = MIN(iA + nneigh + CHUNK - 1, nA-1); int iB = iB_min + tx; //MAX(tx, iB_min); // tx is index for B while (iB <= iB_max){ float dist[CHUNK]; // calculate distance to B for (int i=0; i<CHUNK; ++i) dist[i] = 0.0f; for (int iC=0; iC<nC; ++iC){ float Btemp = A[iC + iB*nC]; for (int i=0; i<CHUNK; ++i){ float temp = Btemp - sA[iC][i]; dist[i] += temp * temp; } } for (int i=0; i<CHUNK; ++i){ int dab = ABS(iA+i-iB); if (dab<=nneigh){ if (iA+i < nA && iA+i != iB){ rho1[tx][i] += expf(-1*dist[i]/dc2); } } } iB += blockDim.x; } // while // final count __syncthreads(); // if (tx < CHUNK) D[iA+tx] = rho1[tx]; if (tx < CHUNK){ float sum = 0.0f; for (int tx1=0; tx1<blockDim.x; ++tx1) sum += rho1[tx1][tx]; if (iA+tx<nA) D[iA+tx] = sum; } } // func
966293189901e5e548c56b10844a45dd713687c0.cu
/** * block loading rho calculation. should be much faster * system('nvcc -ptx citydist_rho4.cu') * iA is multiple of chunk (16) */ #include <cuda_runtime.h> // #include "cublas_v2.h" #include <math.h> #define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val) #define MIN(A,B) ((A)<(B)) ? (A) : (B) #define MAX(A,B) ((A)>(B)) ? (A) : (B) #define NTHREADS 128 #define NC (1+6*2) // #define NC (9) #define CHUNK 16 #define SINGLE_INF (3.402E+38) /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. * Step through one B at a time */ __global__ void eucldist_sorted_rho_exp(float const *A, float *D, int const nA, int const nneigh, int const nC, float const dc){ int iA = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK; int tx = threadIdx.x; __shared__ float sA[NC][CHUNK]; __shared__ float rho1[NTHREADS][CHUNK]; // cache A if (tx < nC){ //use tx as iC for (int i=0; i<CHUNK; ++i){ if (iA+i < nA){ sA[tx][i] = A[tx + (iA+i)*nC]; }else{ sA[tx][i] = SINGLE_INF; } } } for (int i=0; i<CHUNK; ++i) rho1[tx][i] = 0.0f; __syncthreads(); // fill in the shared memory A float dc2 = dc*dc; int iB_min = MAX(iA - nneigh, 0); int iB_max = MIN(iA + nneigh + CHUNK - 1, nA-1); int iB = iB_min + tx; //MAX(tx, iB_min); // tx is index for B while (iB <= iB_max){ float dist[CHUNK]; // calculate distance to B for (int i=0; i<CHUNK; ++i) dist[i] = 0.0f; for (int iC=0; iC<nC; ++iC){ float Btemp = A[iC + iB*nC]; for (int i=0; i<CHUNK; ++i){ float temp = Btemp - sA[iC][i]; dist[i] += temp * temp; } } for (int i=0; i<CHUNK; ++i){ int dab = ABS(iA+i-iB); if (dab<=nneigh){ if (iA+i < nA && iA+i != iB){ rho1[tx][i] += expf(-1*dist[i]/dc2); } } } iB += blockDim.x; } // while // final count __syncthreads(); // if (tx < CHUNK) D[iA+tx] = rho1[tx]; if (tx < CHUNK){ float sum = 0.0f; for (int tx1=0; tx1<blockDim.x; ++tx1) sum += rho1[tx1][tx]; if (iA+tx<nA) D[iA+tx] = sum; } } // func
d25de05d31319a5a15e42d4574ee92bddc208d6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorTransformations.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <cstddef> #include <vector> namespace at { namespace native { #define AT_APPLY_THREADS_PER_BLOCK 32 * 16 #define AT_APPLY_BLOCKS_PER_SM 4 template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) #endif __global__ void kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> __global__ void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } // Flip tensor given a list of dims Tensor flip_cuda(const Tensor& self, IntList dims) { auto in_tensor = self; const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel(); flip_check_errors(total_dims, flip_dims_size, dims); int64_t block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } auto flip_dims = dims.vec(); wrap_all_dims(flip_dims, total_dims); // use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor); auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor); int flip_dim = in_tensor_info.collapseDims(flip_dims[0]); out_tensor_info.collapseDims(flip_dims[0]); hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>) , dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor_info, out_tensor_info, N, flip_dim, total_dims); }); return out_tensor; } auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}); auto shape = in_tensor.sizes().vec(); auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())}); auto strides = in_tensor.strides().vec(); auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())}); // stride_contiguous is the stride of non-contiguous tensor after calling contiguous(), // it is used to compute indices for each element in non-contiguous tensor Tensor stride_contiguous = at::zeros({total_dims}, kLong); int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>(); for (int64_t i = total_dims - 1; i >= 0; i--) { if (i == total_dims - 1) { stride_contiguous_d[i] = 1; } else { stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1]; } } AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims); }); return out_tensor; } template <typename scalar_t> __global__ void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t roll_dim, int64_t start, int64_t size, int64_t stride, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } // roll dim idx is the index of linear_index along the rolling dimension. int64_t roll_dim_idx = linear_index % (stride * size) / stride; // index into the source data to find appropriate value. int64_t source_idx = 0; if( roll_dim_idx >= (size - start) ) { source_idx = linear_index - ((size - start) * stride); } else { source_idx = linear_index + (start * stride); } out_tensor[linear_index] = in_tensor[source_idx]; } // Roll a tensor along a dimension Tensor roll_cuda(const Tensor& self, IntList shifts, IntList dims) { if (dims.size() != 1 || shifts.size() != 1) { return roll_common(self, shifts, dims); } auto in_tensor = self; if(!self.is_contiguous()) { in_tensor = self.contiguous(); } auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } const int64_t N = in_tensor.numel(); const int64_t dim = dims[0]; const int64_t size = in_tensor.size(dim); int64_t start = (size - shifts[0]) % size; // Behavior of % is different in C++ vs Python for negative numbers. This // corrects the difference. if( start < 0 ) start = start + size; dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; AT_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid"); auto total_dims = in_tensor.dim(); AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "roll_cuda", [&] { hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, dim, start, size, in_tensor.stride(dim), total_dims); }); return out_tensor; } }} // namespace at::native
d25de05d31319a5a15e42d4574ee92bddc208d6d.cu
#include <ATen/native/TensorTransformations.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <cstddef> #include <vector> namespace at { namespace native { #define AT_APPLY_THREADS_PER_BLOCK 32 * 16 #define AT_APPLY_BLOCKS_PER_SM 4 template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) #endif __global__ void kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> __global__ void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } // Flip tensor given a list of dims Tensor flip_cuda(const Tensor& self, IntList dims) { auto in_tensor = self; const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel(); flip_check_errors(total_dims, flip_dims_size, dims); int64_t block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } auto flip_dims = dims.vec(); wrap_all_dims(flip_dims, total_dims); // use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor); auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor); int flip_dim = in_tensor_info.collapseDims(flip_dims[0]); out_tensor_info.collapseDims(flip_dims[0]); kernel_pointwise_flip_apply2<scalar_t, int64_t> <<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor_info, out_tensor_info, N, flip_dim, total_dims); }); return out_tensor; } auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}); auto shape = in_tensor.sizes().vec(); auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())}); auto strides = in_tensor.strides().vec(); auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())}); // stride_contiguous is the stride of non-contiguous tensor after calling contiguous(), // it is used to compute indices for each element in non-contiguous tensor Tensor stride_contiguous = at::zeros({total_dims}, kLong); int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>(); for (int64_t i = total_dims - 1; i >= 0; i--) { if (i == total_dims - 1) { stride_contiguous_d[i] = 1; } else { stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1]; } } AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { flip_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims); }); return out_tensor; } template <typename scalar_t> __global__ void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t roll_dim, int64_t start, int64_t size, int64_t stride, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } // roll dim idx is the index of linear_index along the rolling dimension. int64_t roll_dim_idx = linear_index % (stride * size) / stride; // index into the source data to find appropriate value. int64_t source_idx = 0; if( roll_dim_idx >= (size - start) ) { source_idx = linear_index - ((size - start) * stride); } else { source_idx = linear_index + (start * stride); } out_tensor[linear_index] = in_tensor[source_idx]; } // Roll a tensor along a dimension Tensor roll_cuda(const Tensor& self, IntList shifts, IntList dims) { if (dims.size() != 1 || shifts.size() != 1) { return roll_common(self, shifts, dims); } auto in_tensor = self; if(!self.is_contiguous()) { in_tensor = self.contiguous(); } auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } const int64_t N = in_tensor.numel(); const int64_t dim = dims[0]; const int64_t size = in_tensor.size(dim); int64_t start = (size - shifts[0]) % size; // Behavior of % is different in C++ vs Python for negative numbers. This // corrects the difference. if( start < 0 ) start = start + size; dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; AT_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid"); auto total_dims = in_tensor.dim(); AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "roll_cuda", [&] { roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, dim, start, size, in_tensor.stride(dim), total_dims); }); return out_tensor; } }} // namespace at::native
02ff9580a02c277687f72083e61041dba08769d5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "BatchData.h" #include "gdeflate.h" #include "nvcomp/gdeflate.h" #ifdef ENABLE_GDEFLATE #define CHECK_NVCOMP_STATUS(status) \ if ((status) != nvcompSuccess) \ throw std::runtime_error("Failed to decompress data"); #else #define CHECK_NVCOMP_STATUS(status) \ if ((status) != nvcompSuccess) \ throw std::runtime_error("nvcomp not configured with gdeflate support"); #endif // Benchmark performance from the binary data file fname static void run_example(const std::vector<std::vector<char>>& data) { size_t total_bytes = 0; for (const std::vector<char>& part : data) { total_bytes += part.size(); } std::cout << "----------" << std::endl; std::cout << "files: " << data.size() << std::endl; std::cout << "uncompressed (B): " << total_bytes << std::endl; const size_t chunk_size = 1 << 16; // build up input batch on CPU BatchDataCPU input_data_cpu(data, chunk_size); std::cout << "chunks: " << input_data_cpu.size() << std::endl; // compression // Get max output size per chunk nvcompError_t status; size_t max_out_bytes; status = nvcompBatchedGdeflateCompressGetMaxOutputChunkSize( chunk_size, &max_out_bytes); CHECK_NVCOMP_STATUS(status); // Allocate and prepare output/compressed batch BatchDataCPU compress_data_cpu(max_out_bytes, input_data_cpu.size()); #ifdef ENABLE_GDEFLATE // Compress on the CPU using gdeflate CPU batched API gdeflate::compressCPU( input_data_cpu.ptrs(), input_data_cpu.sizes(), chunk_size, input_data_cpu.size(), compress_data_cpu.ptrs(), compress_data_cpu.sizes()); #else throw std::runtime_error("nvcomp configured without gdeflate support. " "Please check the documentation for details on configuring nvcomp with gdeflate.") #endif // compute compression ratio size_t* compressed_sizes_host = compress_data_cpu.sizes(); size_t comp_bytes = 0; for (size_t i = 0; i < compress_data_cpu.size(); ++i) comp_bytes += compressed_sizes_host[i]; std::cout << "comp_size: " << comp_bytes << ", compressed ratio: " << std::fixed << std::setprecision(2) << (double)total_bytes / comp_bytes << std::endl; // Copy compressed data to GPU BatchData compress_data(compress_data_cpu, true); // Allocate and build up decompression batch on GPU BatchData decomp_data(input_data_cpu, false); // Create CUDA stream hipStream_t stream; hipStreamCreate(&stream); // CUDA events to measure decompression time hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); // gdeflate GPU decompression size_t decomp_temp_bytes; status = nvcompBatchedGdeflateDecompressGetTempSize( compress_data.size(), chunk_size, &decomp_temp_bytes); void* d_decomp_temp; CUDA_CHECK(hipMalloc(&d_decomp_temp, decomp_temp_bytes)); CUDA_CHECK(hipStreamSynchronize(stream)); // Run decompression status = nvcompBatchedGdeflateDecompressAsync( compress_data.ptrs(), compress_data.sizes(), decomp_data.sizes(), chunk_size, compress_data.size(), d_decomp_temp, decomp_temp_bytes, decomp_data.ptrs(), stream); // Validate decompressed data against input if (!(input_data_cpu == decomp_data)) throw std::runtime_error("Failed to validate decompressed data"); else std::cout << "decompression validated :)" << std::endl; // Re-run decompression to get throughput hipEventRecord(start, stream); status = nvcompBatchedGdeflateDecompressAsync( compress_data.ptrs(), compress_data.sizes(), decomp_data.sizes(), chunk_size, compress_data.size(), d_decomp_temp, decomp_temp_bytes, decomp_data.ptrs(), stream); hipEventRecord(end, stream); CUDA_CHECK(hipStreamSynchronize(stream)); CHECK_NVCOMP_STATUS(status); float ms; hipEventElapsedTime(&ms, start, end); double decompression_throughput = ((double)total_bytes / ms) * 1e-6; std::cout << "decompression throughput (GB/s): " << decompression_throughput << std::endl; hipFree(d_decomp_temp); hipEventDestroy(start); hipEventDestroy(end); hipStreamDestroy(stream); } #undef CHECK_NVCOMP_STATUS std::vector<char> readFile(const std::string& filename) { std::vector<char> buffer(4096); std::vector<char> host_data; std::ifstream fin(filename, std::ifstream::binary); fin.exceptions(std::ifstream::failbit | std::ifstream::badbit); size_t num; do { num = fin.readsome(buffer.data(), buffer.size()); host_data.insert(host_data.end(), buffer.begin(), buffer.begin() + num); } while (num > 0); return host_data; } std::vector<std::vector<char>> multi_file(const std::vector<std::string>& filenames) { std::vector<std::vector<char>> split_data; for (auto const& filename : filenames) { split_data.emplace_back(readFile(filename)); } return split_data; } int main(int argc, char* argv[]) { std::vector<std::string> file_names(argc - 1); if (argc == 1) { std::cerr << "Must specify at least one file." << std::endl; return 1; } // if `-f` is speficieid, assume single file mode if (strcmp(argv[1], "-f") == 0) { if (argc == 2) { std::cerr << "Missing file name following '-f'" << std::endl; return 1; } else if (argc > 3) { std::cerr << "Unknown extra arguments with '-f'." << std::endl; return 1; } file_names = {argv[2]}; } else { // multi-file mode for (int i = 1; i < argc; ++i) { file_names[i - 1] = argv[i]; } } auto data = multi_file(file_names); run_example(data); return 0; }
02ff9580a02c277687f72083e61041dba08769d5.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "BatchData.h" #include "gdeflate.h" #include "nvcomp/gdeflate.h" #ifdef ENABLE_GDEFLATE #define CHECK_NVCOMP_STATUS(status) \ if ((status) != nvcompSuccess) \ throw std::runtime_error("Failed to decompress data"); #else #define CHECK_NVCOMP_STATUS(status) \ if ((status) != nvcompSuccess) \ throw std::runtime_error("nvcomp not configured with gdeflate support"); #endif // Benchmark performance from the binary data file fname static void run_example(const std::vector<std::vector<char>>& data) { size_t total_bytes = 0; for (const std::vector<char>& part : data) { total_bytes += part.size(); } std::cout << "----------" << std::endl; std::cout << "files: " << data.size() << std::endl; std::cout << "uncompressed (B): " << total_bytes << std::endl; const size_t chunk_size = 1 << 16; // build up input batch on CPU BatchDataCPU input_data_cpu(data, chunk_size); std::cout << "chunks: " << input_data_cpu.size() << std::endl; // compression // Get max output size per chunk nvcompError_t status; size_t max_out_bytes; status = nvcompBatchedGdeflateCompressGetMaxOutputChunkSize( chunk_size, &max_out_bytes); CHECK_NVCOMP_STATUS(status); // Allocate and prepare output/compressed batch BatchDataCPU compress_data_cpu(max_out_bytes, input_data_cpu.size()); #ifdef ENABLE_GDEFLATE // Compress on the CPU using gdeflate CPU batched API gdeflate::compressCPU( input_data_cpu.ptrs(), input_data_cpu.sizes(), chunk_size, input_data_cpu.size(), compress_data_cpu.ptrs(), compress_data_cpu.sizes()); #else throw std::runtime_error("nvcomp configured without gdeflate support. " "Please check the documentation for details on configuring nvcomp with gdeflate.") #endif // compute compression ratio size_t* compressed_sizes_host = compress_data_cpu.sizes(); size_t comp_bytes = 0; for (size_t i = 0; i < compress_data_cpu.size(); ++i) comp_bytes += compressed_sizes_host[i]; std::cout << "comp_size: " << comp_bytes << ", compressed ratio: " << std::fixed << std::setprecision(2) << (double)total_bytes / comp_bytes << std::endl; // Copy compressed data to GPU BatchData compress_data(compress_data_cpu, true); // Allocate and build up decompression batch on GPU BatchData decomp_data(input_data_cpu, false); // Create CUDA stream cudaStream_t stream; cudaStreamCreate(&stream); // CUDA events to measure decompression time cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); // gdeflate GPU decompression size_t decomp_temp_bytes; status = nvcompBatchedGdeflateDecompressGetTempSize( compress_data.size(), chunk_size, &decomp_temp_bytes); void* d_decomp_temp; CUDA_CHECK(cudaMalloc(&d_decomp_temp, decomp_temp_bytes)); CUDA_CHECK(cudaStreamSynchronize(stream)); // Run decompression status = nvcompBatchedGdeflateDecompressAsync( compress_data.ptrs(), compress_data.sizes(), decomp_data.sizes(), chunk_size, compress_data.size(), d_decomp_temp, decomp_temp_bytes, decomp_data.ptrs(), stream); // Validate decompressed data against input if (!(input_data_cpu == decomp_data)) throw std::runtime_error("Failed to validate decompressed data"); else std::cout << "decompression validated :)" << std::endl; // Re-run decompression to get throughput cudaEventRecord(start, stream); status = nvcompBatchedGdeflateDecompressAsync( compress_data.ptrs(), compress_data.sizes(), decomp_data.sizes(), chunk_size, compress_data.size(), d_decomp_temp, decomp_temp_bytes, decomp_data.ptrs(), stream); cudaEventRecord(end, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); CHECK_NVCOMP_STATUS(status); float ms; cudaEventElapsedTime(&ms, start, end); double decompression_throughput = ((double)total_bytes / ms) * 1e-6; std::cout << "decompression throughput (GB/s): " << decompression_throughput << std::endl; cudaFree(d_decomp_temp); cudaEventDestroy(start); cudaEventDestroy(end); cudaStreamDestroy(stream); } #undef CHECK_NVCOMP_STATUS std::vector<char> readFile(const std::string& filename) { std::vector<char> buffer(4096); std::vector<char> host_data; std::ifstream fin(filename, std::ifstream::binary); fin.exceptions(std::ifstream::failbit | std::ifstream::badbit); size_t num; do { num = fin.readsome(buffer.data(), buffer.size()); host_data.insert(host_data.end(), buffer.begin(), buffer.begin() + num); } while (num > 0); return host_data; } std::vector<std::vector<char>> multi_file(const std::vector<std::string>& filenames) { std::vector<std::vector<char>> split_data; for (auto const& filename : filenames) { split_data.emplace_back(readFile(filename)); } return split_data; } int main(int argc, char* argv[]) { std::vector<std::string> file_names(argc - 1); if (argc == 1) { std::cerr << "Must specify at least one file." << std::endl; return 1; } // if `-f` is speficieid, assume single file mode if (strcmp(argv[1], "-f") == 0) { if (argc == 2) { std::cerr << "Missing file name following '-f'" << std::endl; return 1; } else if (argc > 3) { std::cerr << "Unknown extra arguments with '-f'." << std::endl; return 1; } file_names = {argv[2]}; } else { // multi-file mode for (int i = 1; i < argc; ++i) { file_names[i - 1] = argv[i]; } } auto data = multi_file(file_names); run_example(data); return 0; }
7293452316659e28cc38df06fcac336f63ea44c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2012 by Erik Opavsky * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "constants.h" __global__ void counterKernel (char * sequences, int sequenceLength, char * query, int queryLength, uint * count, double matchAccuracy) { // read query and sequence segment into shared memory for faster access extern __shared__ char shared[]; char * sharedQuery = &shared[0]; char * sharedSequence = &shared[queryLength]; // start of current sequence section int sequenceIndex = blockIdx.x * sequenceLength + blockIdx.y * blockDim.x + threadIdx.x; if (sequenceIndex < sequenceLength) *(sharedSequence + threadIdx.x) = *(sequences + sequenceIndex); if (threadIdx.x < queryLength) { *(sharedQuery + threadIdx.x) = query[threadIdx.x]; *(sharedSequence + blockDim.x + threadIdx.x) = *(sequences + sequenceIndex + blockDim.x); } int numMatches = 0; for (int i = 0; i < queryLength; i++) { if (*(sequences + sequenceIndex + i) == *(query + i)) numMatches++; } if (numMatches / (double) queryLength >= matchAccuracy) atomicInc (count + blockIdx.x * (sequenceLength - queryLength + 1) + blockIdx.y * blockDim.x + threadIdx.x, UINT_MAX); } // grep -c query fileName uint counter (char * d_sequences, int numSequences, int sequenceLength, char * query, int queryLength, double matchAccuracy) { // put query into device memory char * d_query; hipMalloc (&d_query, queryLength * sizeof (char)); hipMemcpy (d_query, query, queryLength * sizeof (char), hipMemcpyHostToDevice); int numBuckets = sequenceLength - queryLength + 1; int numThreads = numBuckets; if (numThreads > THREADS_PER_BLOCK) numThreads = THREADS_PER_BLOCK; int numSequenceSections = ceil ((numBuckets) / (float) numThreads); uint * d_tempCounters; hipMalloc (&d_tempCounters, numSequences * numBuckets * sizeof (int)); hipMemset (d_tempCounters, 0, sizeof (uint) * numSequences * numBuckets); dim3 gridDim (numSequences, numSequenceSections); hipLaunchKernelGGL(( counterKernel), dim3(gridDim), dim3(numThreads), (queryLength * 2 + numThreads) * sizeof (char), 0, d_sequences, sequenceLength, d_query, queryLength, d_tempCounters, matchAccuracy); uint count = 0; uint * tempCounters = (uint *) malloc (sizeof (uint) * numSequences * numBuckets); hipMemcpy (tempCounters, d_tempCounters, sizeof (uint) * numSequences * numBuckets, hipMemcpyDeviceToHost); for (int i = 0; i < numSequences; i++) for (int j = 0; j < numBuckets; j++) if (tempCounters[i * numBuckets + j]) { count++; break; } hipFree (d_tempCounters); hipFree (d_query); return count; }
7293452316659e28cc38df06fcac336f63ea44c2.cu
/* Copyright 2012 by Erik Opavsky * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "constants.h" __global__ void counterKernel (char * sequences, int sequenceLength, char * query, int queryLength, uint * count, double matchAccuracy) { // read query and sequence segment into shared memory for faster access extern __shared__ char shared[]; char * sharedQuery = &shared[0]; char * sharedSequence = &shared[queryLength]; // start of current sequence section int sequenceIndex = blockIdx.x * sequenceLength + blockIdx.y * blockDim.x + threadIdx.x; if (sequenceIndex < sequenceLength) *(sharedSequence + threadIdx.x) = *(sequences + sequenceIndex); if (threadIdx.x < queryLength) { *(sharedQuery + threadIdx.x) = query[threadIdx.x]; *(sharedSequence + blockDim.x + threadIdx.x) = *(sequences + sequenceIndex + blockDim.x); } int numMatches = 0; for (int i = 0; i < queryLength; i++) { if (*(sequences + sequenceIndex + i) == *(query + i)) numMatches++; } if (numMatches / (double) queryLength >= matchAccuracy) atomicInc (count + blockIdx.x * (sequenceLength - queryLength + 1) + blockIdx.y * blockDim.x + threadIdx.x, UINT_MAX); } // grep -c query fileName uint counter (char * d_sequences, int numSequences, int sequenceLength, char * query, int queryLength, double matchAccuracy) { // put query into device memory char * d_query; cudaMalloc (&d_query, queryLength * sizeof (char)); cudaMemcpy (d_query, query, queryLength * sizeof (char), cudaMemcpyHostToDevice); int numBuckets = sequenceLength - queryLength + 1; int numThreads = numBuckets; if (numThreads > THREADS_PER_BLOCK) numThreads = THREADS_PER_BLOCK; int numSequenceSections = ceil ((numBuckets) / (float) numThreads); uint * d_tempCounters; cudaMalloc (&d_tempCounters, numSequences * numBuckets * sizeof (int)); cudaMemset (d_tempCounters, 0, sizeof (uint) * numSequences * numBuckets); dim3 gridDim (numSequences, numSequenceSections); counterKernel<<<gridDim, numThreads, (queryLength * 2 + numThreads) * sizeof (char)>>> (d_sequences, sequenceLength, d_query, queryLength, d_tempCounters, matchAccuracy); uint count = 0; uint * tempCounters = (uint *) malloc (sizeof (uint) * numSequences * numBuckets); cudaMemcpy (tempCounters, d_tempCounters, sizeof (uint) * numSequences * numBuckets, cudaMemcpyDeviceToHost); for (int i = 0; i < numSequences; i++) for (int j = 0; j < numBuckets; j++) if (tempCounters[i * numBuckets + j]) { count++; break; } cudaFree (d_tempCounters); cudaFree (d_query); return count; }
440a0fafc4f4dcd8d545c42fd4dd5fc77d5f0fae.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <torch/extension.h> #include <iostream> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <math.h> #include <algorithm> #include <stdlib.h> #include "cpu/vision.h" /*rle cuda kernels are cuda version of the corresponding cpu functions here https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c these are only a subset of rle kernels.*/ typedef unsigned int uint; typedef unsigned long siz; typedef unsigned char byte; //6144 is based on minimum shared memory size per SM //across all pytorch-supported GPUs. Need to use blocking //to avoid this restriction const int BUFFER_SIZE=6144; const int CNTS_SIZE=6144; __global__ void crop_and_scale_cuda_kernel(double *dense_poly_data, int *per_anchor_poly_idx, int *poly_rel_idx, int poly_count, int anchor_count, float4 *anchor_data, int mask_size){ int tid = threadIdx.x; int block_jump = blockDim.x; int poly_id = blockIdx.x; int anchor_idx; for (anchor_idx = 0; anchor_idx < anchor_count; anchor_idx++){ if (poly_id < per_anchor_poly_idx[anchor_idx + 1]) break; } float w = anchor_data[anchor_idx].z - anchor_data[anchor_idx].x; float h = anchor_data[anchor_idx].w - anchor_data[anchor_idx].y; w = fmaxf(w, 1.0f); h = fmaxf(h, 1.0f); float ratio_h = ((float) mask_size) / h; float ratio_w = ((float) mask_size) / w; int poly_ptr_idx_start = poly_rel_idx[poly_id]; int poly_ptr_idx_end = poly_rel_idx[poly_id + 1]; double *poly_data_buf = dense_poly_data + poly_ptr_idx_start; int len = poly_ptr_idx_end - poly_ptr_idx_start; for (int j = tid; j < len; j += block_jump){ if (j % 2 == 0) poly_data_buf[j] = ratio_w*((float) poly_data_buf[j]- anchor_data[anchor_idx].x); if (j % 2 == 1) poly_data_buf[j] = ratio_h*((float) poly_data_buf[j]- anchor_data[anchor_idx].y); } } //merging masks happens on mask format, not RLE format. __global__ void merge_masks_cuda_kernel(byte *masks_in, float *masks_out, const int mask_size, int *per_anchor_poly_idx, int anchor_count){ int anchor_idx = blockIdx.x; int tid = threadIdx.x; int jump_block = blockDim.x; int mask_start_idx = per_anchor_poly_idx[anchor_idx]; int num_of_masks_to_merge = per_anchor_poly_idx[anchor_idx + 1]-per_anchor_poly_idx[anchor_idx]; for(int j = tid; j < mask_size * mask_size; j += jump_block){ int transposed_pixel = (j % mask_size) * mask_size + j / mask_size; byte pixel = 0; for(int k = 0; k < num_of_masks_to_merge; k++){ if (masks_in[(mask_start_idx + k) * mask_size * mask_size + j] == 1) pixel = 1; if (pixel == 1) break; } masks_out[anchor_idx * mask_size * mask_size + transposed_pixel] = (float) pixel; } } /*cuda version of rleDecode function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void decode_rle_cuda_kernel(const int *num_of_cnts, uint *cnts, long h, long w, byte *mask) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; int m = num_of_cnts[poly_id]; uint *cnts_buf = cnts + CNTS_SIZE * poly_id; byte *mask_ptr = mask + poly_id * h * w; __shared__ uint shbuf1[CNTS_SIZE]; __shared__ uint shbuf2[CNTS_SIZE]; //initialize shbuf for scan. first element is 0 (exclusive scan) for (long i = tid; i < CNTS_SIZE; i += block_jump){ shbuf1[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; shbuf2[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; } __syncthreads(); //double buffering for scan int switch_buf = 0; for (int offset = 1; offset <= m; offset *= 2){ switch_buf = 1 - switch_buf; if(switch_buf == 0){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf2[j] = shbuf1[j]+shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } }else if (switch_buf == 1){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } uint *scanned_buf = switch_buf == 0 ? shbuf2 : shbuf1; //find which bin pixel j falls into , which determines the pixel value //use binary search for(int j = tid; j < h * w; j += block_jump){ int bin = 0; int min_idx = 0; int max_idx = m; int mid_idx = m / 2; while(max_idx > min_idx){ if(j > scanned_buf[mid_idx]) { min_idx = mid_idx+1; mid_idx = (min_idx + max_idx) / 2; } else if (j < scanned_buf[mid_idx]) { max_idx = mid_idx; mid_idx = (min_idx + max_idx) / 2; } else { mid_idx++; break; } } int k = mid_idx; byte pixel = k % 2 == 0 ? 1 : 0; mask_ptr[j] = pixel; } } /*cuda version of rleFrPoly function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void rle_fr_poly_cuda_kernel(const double *dense_coordinates, int *poly_rel_idx, long h, long w, uint *cnts, int *x_in, int *y_in, int *u_in, int *v_in, uint *a_in, uint *b_in, int *num_of_cnts) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; long cnts_offset = poly_id * CNTS_SIZE; long k = (poly_rel_idx[poly_id + 1] - poly_rel_idx[poly_id]) / 2; const double *xy = dense_coordinates + poly_rel_idx[poly_id]; int *x = x_in + poly_id * BUFFER_SIZE; int *y = y_in + poly_id * BUFFER_SIZE; int *u = u_in + poly_id * BUFFER_SIZE; int *v = v_in + poly_id * BUFFER_SIZE; uint *a = a_in + poly_id * BUFFER_SIZE; uint *b = b_in + poly_id * BUFFER_SIZE; /* upsample and get discrete points densely along entire boundary */ long j, m = 0; double scale = 5; __shared__ int shbuf1[BUFFER_SIZE]; __shared__ int shbuf2[BUFFER_SIZE]; for(long j = tid; j < BUFFER_SIZE; j += block_jump) { shbuf1[j] = 0; shbuf2[j] = 0; } for(long j = tid; j <= k; j += block_jump) x[j] = j < k ? ((int) (scale * xy[2 * j + 0] + 0.5)) : ((int) (scale * xy[0] + 0.5)); for(long j = tid; j <= k; j += block_jump) y[j] = j < k ? ((int) (scale * xy[2 * j + 1] + 0.5)) : ((int) (scale * xy[1] + 0.5)); __syncthreads(); for(int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = abs(xe - xs); dy = abs(ys - ye); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; dist = dx >= dy ? dx + 1 : dy + 1; shbuf1[j + 1] = dist; shbuf2[j + 1] = dist; } __syncthreads(); //block-wide exclusive prefix scan int switch_buf = 0; for (int offset = 1; offset <= k; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j] + shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = __sad(xe, xs, 0); dy = __sad(ys, ye, 0); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; m = switch_buf == 0 ? shbuf2[j] : shbuf1[j]; if (dx >= dy) for (d = 0; d <= dx; d++) { /*the multiplication statement 's*t' causes nvcc to optimize with flush-to-zero=True for double precision multiply, which we observe produces different results than CPU occasionally. To force flush-to-zero=False, we use __dmul_rn intrinsics function */ t = flip ? dx - d : d; u[m] = t + xs; v[m] = (int) (ys + __dmul_rn(s, t) + .5); m++; } else for (d = 0; d <= dy; d++) { t = flip ? dy - d : d; v[m] = t + ys; u[m] = (int) (xs + __dmul_rn(s, t) + .5); m++; } } __syncthreads(); m = switch_buf == 0 ? shbuf2[k] : shbuf1[k]; int k2 = m; __syncthreads(); double xd, yd; if (tid == 0) { shbuf1[tid] = 0; shbuf2[tid] = 0; } /* get points along y-boundary and downsample */ for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if (u[j] != u[j - 1]){ xd = (double) (u[j] < u[j-1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1 ) { shbuf1[j] = 0; shbuf2[j] = 0; continue; } yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); shbuf1[j] = 1; shbuf2[j] = 1; } else { shbuf1[j] = 0; shbuf2[j] = 0; } } } __syncthreads(); //exclusive prefix scan switch_buf = 0; for (int offset = 1; offset < k2; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j - offset] + shbuf1[j]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j - offset] + shbuf2[j]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if(u[j] != u[j - 1]){ xd = (double) (u[j] < u[j - 1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1) {continue;} yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); m = switch_buf == 0 ? shbuf2[j - 1]:shbuf1[j - 1]; x[m] = (int) xd; y[m] = (int) yd; m++; } } } __syncthreads(); /* compute rle encoding given y-boundary points */ m = switch_buf == 0 ? shbuf2[k2 - 1] : shbuf1[k2 - 1]; int k3 = m; for (int j = tid; j <= k3; j += block_jump){ if (j < k3) a[j] = (uint) (x[j] * (int) (h) + y[j]); else a[j] = (uint)(h * w); } k3++; __syncthreads(); //run brick sort on a for k3+1 element //load k3+1 elements of a into shared memory for(long j = tid; j < k3; j += block_jump) shbuf1[j]=a[j]; __syncthreads(); uint a_temp; for (int r = 0; r <= k3 / 2; r++){ int evenCas = k3 / 2; int oddCas = (k3 - 1) / 2; //start with 0, need (k3+1)/2 CAS for (int j = tid; j < evenCas; j += block_jump){ if (shbuf1[2 * j] > shbuf1[2 * j + 1]){ a_temp = shbuf1[2 * j]; shbuf1[2 * j]=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = a_temp; } } __syncthreads(); //start with 1 for (int j = tid; j < oddCas; j += block_jump){ if (shbuf1[2 * j + 1] > shbuf1[2 * j + 2]){ a_temp=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = shbuf1[2 * j + 2]; shbuf1[2 * j + 2]=a_temp; } } __syncthreads(); } for(long j = tid; j < k3; j += block_jump) { if(j>0) shbuf2[j] = shbuf1[j - 1]; else shbuf2[j] = 0; } __syncthreads(); for(int j = tid; j < k3; j += block_jump){ shbuf1[j] -= shbuf2[j]; } __syncthreads(); uint *cnts_buf = cnts + cnts_offset; if (tid == 0){ j = m = 0; cnts_buf[m++] = shbuf1[j++]; while (j < k3) if (shbuf1[j] > 0) cnts_buf[m++] = shbuf1[j++]; else { j++; if (j < k3) cnts_buf[m - 1] += shbuf1[j++]; } num_of_cnts[poly_id] = m; } __syncthreads(); } at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons, const at::Tensor anchors, const int mask_size){ const int M = mask_size; assert (M < 32); //if M >=32, shared memory buffer size may not be //sufficient. Need to fix this by blocking float *d_anchor_data = anchors.data_ptr<float>(); int num_of_anchors = anchors.size(0); auto per_anchor_poly_idx = at::empty({num_of_anchors + 1}, at::CPU(at::kInt)); int num_of_poly = 0; for (int i = 0; i < num_of_anchors; i++){ *(per_anchor_poly_idx.data_ptr<int>() + i) = num_of_poly; num_of_poly += polygons[i].size(); } *(per_anchor_poly_idx.data_ptr<int>() + num_of_anchors) = num_of_poly; auto poly_rel_idx = at::empty({num_of_poly + 1}, at::CPU(at::kInt)); double *dense_poly_data = dense_vector.data_ptr<double>(); int start_idx = 0; int poly_count = 0; for(int i = 0; i < polygons.size(); i++){ for(int j=0; j < polygons[i].size(); j++) { *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; start_idx += polygons[i][j].size(0); poly_count++; } } *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; at::Tensor d_x_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_y_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_u_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_v_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_a_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));//used with uint* pointer at::Tensor d_b_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); //used with uint* pointer at::Tensor d_mask_t = torch::empty({M * M * num_of_poly}, torch::CUDA(at::kByte)); auto result = torch::empty({num_of_anchors, M, M}, torch::CUDA(at::kFloat)); at::Tensor d_num_of_counts_t = torch::empty({num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_cnts_t = torch::empty({CNTS_SIZE * num_of_poly}, torch::CUDA(at::kInt)); auto d_dense_vector = dense_vector.cuda(); auto d_per_anchor_poly_idx = per_anchor_poly_idx.cuda(); auto d_poly_rel_idx = poly_rel_idx.cuda(); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( crop_and_scale_cuda_kernel), dim3(num_of_poly), dim3(256), 0, stream.stream(), d_dense_vector.data_ptr<double>(), d_per_anchor_poly_idx.data_ptr<int>(), d_poly_rel_idx.data_ptr<int>(), poly_count, num_of_anchors, (float4*) d_anchor_data, M); //TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem, //and occupancy is likely shmem capacity bound hipLaunchKernelGGL(( rle_fr_poly_cuda_kernel), dim3(num_of_poly), dim3(1024), 0, stream.stream(), d_dense_vector.data_ptr<double>(), d_poly_rel_idx.data_ptr<int>(), M, M, (uint*) d_cnts_t.data_ptr<int>(), d_x_t.data_ptr<int>(), d_y_t.data_ptr<int>(), d_u_t.data_ptr<int>(), d_v_t.data_ptr<int>(), (uint*) d_a_t.data_ptr<int>(), (uint*) d_b_t.data_ptr<int>(), d_num_of_counts_t.data_ptr<int>()); hipLaunchKernelGGL(( decode_rle_cuda_kernel), dim3(num_of_poly), dim3(256), 0, stream.stream(), d_num_of_counts_t.data_ptr<int>(), (uint*) d_cnts_t.data_ptr<int>(), M, M, d_mask_t.data_ptr<byte>()); hipLaunchKernelGGL(( merge_masks_cuda_kernel), dim3(num_of_anchors), dim3(256), 0, stream.stream(), d_mask_t.data<byte>(), result.data_ptr<float>(), M, d_per_anchor_poly_idx.data_ptr<int>(), num_of_anchors); return result; }
440a0fafc4f4dcd8d545c42fd4dd5fc77d5f0fae.cu
/** * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <torch/extension.h> #include <iostream> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <math.h> #include <algorithm> #include <stdlib.h> #include "cpu/vision.h" /*rle cuda kernels are cuda version of the corresponding cpu functions here https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c these are only a subset of rle kernels.*/ typedef unsigned int uint; typedef unsigned long siz; typedef unsigned char byte; //6144 is based on minimum shared memory size per SM //across all pytorch-supported GPUs. Need to use blocking //to avoid this restriction const int BUFFER_SIZE=6144; const int CNTS_SIZE=6144; __global__ void crop_and_scale_cuda_kernel(double *dense_poly_data, int *per_anchor_poly_idx, int *poly_rel_idx, int poly_count, int anchor_count, float4 *anchor_data, int mask_size){ int tid = threadIdx.x; int block_jump = blockDim.x; int poly_id = blockIdx.x; int anchor_idx; for (anchor_idx = 0; anchor_idx < anchor_count; anchor_idx++){ if (poly_id < per_anchor_poly_idx[anchor_idx + 1]) break; } float w = anchor_data[anchor_idx].z - anchor_data[anchor_idx].x; float h = anchor_data[anchor_idx].w - anchor_data[anchor_idx].y; w = fmaxf(w, 1.0f); h = fmaxf(h, 1.0f); float ratio_h = ((float) mask_size) / h; float ratio_w = ((float) mask_size) / w; int poly_ptr_idx_start = poly_rel_idx[poly_id]; int poly_ptr_idx_end = poly_rel_idx[poly_id + 1]; double *poly_data_buf = dense_poly_data + poly_ptr_idx_start; int len = poly_ptr_idx_end - poly_ptr_idx_start; for (int j = tid; j < len; j += block_jump){ if (j % 2 == 0) poly_data_buf[j] = ratio_w*((float) poly_data_buf[j]- anchor_data[anchor_idx].x); if (j % 2 == 1) poly_data_buf[j] = ratio_h*((float) poly_data_buf[j]- anchor_data[anchor_idx].y); } } //merging masks happens on mask format, not RLE format. __global__ void merge_masks_cuda_kernel(byte *masks_in, float *masks_out, const int mask_size, int *per_anchor_poly_idx, int anchor_count){ int anchor_idx = blockIdx.x; int tid = threadIdx.x; int jump_block = blockDim.x; int mask_start_idx = per_anchor_poly_idx[anchor_idx]; int num_of_masks_to_merge = per_anchor_poly_idx[anchor_idx + 1]-per_anchor_poly_idx[anchor_idx]; for(int j = tid; j < mask_size * mask_size; j += jump_block){ int transposed_pixel = (j % mask_size) * mask_size + j / mask_size; byte pixel = 0; for(int k = 0; k < num_of_masks_to_merge; k++){ if (masks_in[(mask_start_idx + k) * mask_size * mask_size + j] == 1) pixel = 1; if (pixel == 1) break; } masks_out[anchor_idx * mask_size * mask_size + transposed_pixel] = (float) pixel; } } /*cuda version of rleDecode function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void decode_rle_cuda_kernel(const int *num_of_cnts, uint *cnts, long h, long w, byte *mask) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; int m = num_of_cnts[poly_id]; uint *cnts_buf = cnts + CNTS_SIZE * poly_id; byte *mask_ptr = mask + poly_id * h * w; __shared__ uint shbuf1[CNTS_SIZE]; __shared__ uint shbuf2[CNTS_SIZE]; //initialize shbuf for scan. first element is 0 (exclusive scan) for (long i = tid; i < CNTS_SIZE; i += block_jump){ shbuf1[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; shbuf2[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0; } __syncthreads(); //double buffering for scan int switch_buf = 0; for (int offset = 1; offset <= m; offset *= 2){ switch_buf = 1 - switch_buf; if(switch_buf == 0){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf2[j] = shbuf1[j]+shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } }else if (switch_buf == 1){ for(int j = tid;j <= m;j += block_jump){ if(j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } uint *scanned_buf = switch_buf == 0 ? shbuf2 : shbuf1; //find which bin pixel j falls into , which determines the pixel value //use binary search for(int j = tid; j < h * w; j += block_jump){ int bin = 0; int min_idx = 0; int max_idx = m; int mid_idx = m / 2; while(max_idx > min_idx){ if(j > scanned_buf[mid_idx]) { min_idx = mid_idx+1; mid_idx = (min_idx + max_idx) / 2; } else if (j < scanned_buf[mid_idx]) { max_idx = mid_idx; mid_idx = (min_idx + max_idx) / 2; } else { mid_idx++; break; } } int k = mid_idx; byte pixel = k % 2 == 0 ? 1 : 0; mask_ptr[j] = pixel; } } /*cuda version of rleFrPoly function in this API: https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/ __global__ void rle_fr_poly_cuda_kernel(const double *dense_coordinates, int *poly_rel_idx, long h, long w, uint *cnts, int *x_in, int *y_in, int *u_in, int *v_in, uint *a_in, uint *b_in, int *num_of_cnts) { int poly_id = blockIdx.x; int tid = threadIdx.x; int block_jump = blockDim.x; long cnts_offset = poly_id * CNTS_SIZE; long k = (poly_rel_idx[poly_id + 1] - poly_rel_idx[poly_id]) / 2; const double *xy = dense_coordinates + poly_rel_idx[poly_id]; int *x = x_in + poly_id * BUFFER_SIZE; int *y = y_in + poly_id * BUFFER_SIZE; int *u = u_in + poly_id * BUFFER_SIZE; int *v = v_in + poly_id * BUFFER_SIZE; uint *a = a_in + poly_id * BUFFER_SIZE; uint *b = b_in + poly_id * BUFFER_SIZE; /* upsample and get discrete points densely along entire boundary */ long j, m = 0; double scale = 5; __shared__ int shbuf1[BUFFER_SIZE]; __shared__ int shbuf2[BUFFER_SIZE]; for(long j = tid; j < BUFFER_SIZE; j += block_jump) { shbuf1[j] = 0; shbuf2[j] = 0; } for(long j = tid; j <= k; j += block_jump) x[j] = j < k ? ((int) (scale * xy[2 * j + 0] + 0.5)) : ((int) (scale * xy[0] + 0.5)); for(long j = tid; j <= k; j += block_jump) y[j] = j < k ? ((int) (scale * xy[2 * j + 1] + 0.5)) : ((int) (scale * xy[1] + 0.5)); __syncthreads(); for(int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = abs(xe - xs); dy = abs(ys - ye); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; dist = dx >= dy ? dx + 1 : dy + 1; shbuf1[j + 1] = dist; shbuf2[j + 1] = dist; } __syncthreads(); //block-wide exclusive prefix scan int switch_buf = 0; for (int offset = 1; offset <= k; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j] + shbuf1[j - offset]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for(int j = tid; j <= k; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k; j += block_jump){ int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist; int flip; double s; dx = __sad(xe, xs, 0); dy = __sad(ys, ye, 0); flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye); if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;} s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy; m = switch_buf == 0 ? shbuf2[j] : shbuf1[j]; if (dx >= dy) for (d = 0; d <= dx; d++) { /*the multiplication statement 's*t' causes nvcc to optimize with flush-to-zero=True for double precision multiply, which we observe produces different results than CPU occasionally. To force flush-to-zero=False, we use __dmul_rn intrinsics function */ t = flip ? dx - d : d; u[m] = t + xs; v[m] = (int) (ys + __dmul_rn(s, t) + .5); m++; } else for (d = 0; d <= dy; d++) { t = flip ? dy - d : d; v[m] = t + ys; u[m] = (int) (xs + __dmul_rn(s, t) + .5); m++; } } __syncthreads(); m = switch_buf == 0 ? shbuf2[k] : shbuf1[k]; int k2 = m; __syncthreads(); double xd, yd; if (tid == 0) { shbuf1[tid] = 0; shbuf2[tid] = 0; } /* get points along y-boundary and downsample */ for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if (u[j] != u[j - 1]){ xd = (double) (u[j] < u[j-1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1 ) { shbuf1[j] = 0; shbuf2[j] = 0; continue; } yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); shbuf1[j] = 1; shbuf2[j] = 1; } else { shbuf1[j] = 0; shbuf2[j] = 0; } } } __syncthreads(); //exclusive prefix scan switch_buf = 0; for (int offset = 1; offset < k2; offset *= 2){ switch_buf = 1 - switch_buf; if (switch_buf == 0){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf2[j] = shbuf1[j - offset] + shbuf1[j]; else shbuf2[j] = shbuf1[j]; } } else if (switch_buf == 1){ for (int j = tid; j < k2; j += block_jump){ if (j >= offset) shbuf1[j] = shbuf2[j - offset] + shbuf2[j]; else shbuf1[j] = shbuf2[j]; } } __syncthreads(); } for (int j = tid; j < k2; j += block_jump){ if (j > 0){ if(u[j] != u[j - 1]){ xd = (double) (u[j] < u[j - 1] ? u[j] : u[j] - 1); xd = (xd + .5) / scale - .5; if (floor(xd) != xd || xd < 0 || xd > w - 1) {continue;} yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5; if (yd < 0) yd = 0; else if (yd > h) yd = h; yd = ceil(yd); m = switch_buf == 0 ? shbuf2[j - 1]:shbuf1[j - 1]; x[m] = (int) xd; y[m] = (int) yd; m++; } } } __syncthreads(); /* compute rle encoding given y-boundary points */ m = switch_buf == 0 ? shbuf2[k2 - 1] : shbuf1[k2 - 1]; int k3 = m; for (int j = tid; j <= k3; j += block_jump){ if (j < k3) a[j] = (uint) (x[j] * (int) (h) + y[j]); else a[j] = (uint)(h * w); } k3++; __syncthreads(); //run brick sort on a for k3+1 element //load k3+1 elements of a into shared memory for(long j = tid; j < k3; j += block_jump) shbuf1[j]=a[j]; __syncthreads(); uint a_temp; for (int r = 0; r <= k3 / 2; r++){ int evenCas = k3 / 2; int oddCas = (k3 - 1) / 2; //start with 0, need (k3+1)/2 CAS for (int j = tid; j < evenCas; j += block_jump){ if (shbuf1[2 * j] > shbuf1[2 * j + 1]){ a_temp = shbuf1[2 * j]; shbuf1[2 * j]=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = a_temp; } } __syncthreads(); //start with 1 for (int j = tid; j < oddCas; j += block_jump){ if (shbuf1[2 * j + 1] > shbuf1[2 * j + 2]){ a_temp=shbuf1[2 * j + 1]; shbuf1[2 * j + 1] = shbuf1[2 * j + 2]; shbuf1[2 * j + 2]=a_temp; } } __syncthreads(); } for(long j = tid; j < k3; j += block_jump) { if(j>0) shbuf2[j] = shbuf1[j - 1]; else shbuf2[j] = 0; } __syncthreads(); for(int j = tid; j < k3; j += block_jump){ shbuf1[j] -= shbuf2[j]; } __syncthreads(); uint *cnts_buf = cnts + cnts_offset; if (tid == 0){ j = m = 0; cnts_buf[m++] = shbuf1[j++]; while (j < k3) if (shbuf1[j] > 0) cnts_buf[m++] = shbuf1[j++]; else { j++; if (j < k3) cnts_buf[m - 1] += shbuf1[j++]; } num_of_cnts[poly_id] = m; } __syncthreads(); } at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons, const at::Tensor anchors, const int mask_size){ const int M = mask_size; assert (M < 32); //if M >=32, shared memory buffer size may not be //sufficient. Need to fix this by blocking float *d_anchor_data = anchors.data_ptr<float>(); int num_of_anchors = anchors.size(0); auto per_anchor_poly_idx = at::empty({num_of_anchors + 1}, at::CPU(at::kInt)); int num_of_poly = 0; for (int i = 0; i < num_of_anchors; i++){ *(per_anchor_poly_idx.data_ptr<int>() + i) = num_of_poly; num_of_poly += polygons[i].size(); } *(per_anchor_poly_idx.data_ptr<int>() + num_of_anchors) = num_of_poly; auto poly_rel_idx = at::empty({num_of_poly + 1}, at::CPU(at::kInt)); double *dense_poly_data = dense_vector.data_ptr<double>(); int start_idx = 0; int poly_count = 0; for(int i = 0; i < polygons.size(); i++){ for(int j=0; j < polygons[i].size(); j++) { *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; start_idx += polygons[i][j].size(0); poly_count++; } } *(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx; at::Tensor d_x_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_y_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_u_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_v_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_a_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));//used with uint* pointer at::Tensor d_b_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); //used with uint* pointer at::Tensor d_mask_t = torch::empty({M * M * num_of_poly}, torch::CUDA(at::kByte)); auto result = torch::empty({num_of_anchors, M, M}, torch::CUDA(at::kFloat)); at::Tensor d_num_of_counts_t = torch::empty({num_of_poly}, torch::CUDA(at::kInt)); at::Tensor d_cnts_t = torch::empty({CNTS_SIZE * num_of_poly}, torch::CUDA(at::kInt)); auto d_dense_vector = dense_vector.cuda(); auto d_per_anchor_poly_idx = per_anchor_poly_idx.cuda(); auto d_poly_rel_idx = poly_rel_idx.cuda(); auto stream = at::cuda::getCurrentCUDAStream(); crop_and_scale_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(), d_per_anchor_poly_idx.data_ptr<int>(), d_poly_rel_idx.data_ptr<int>(), poly_count, num_of_anchors, (float4*) d_anchor_data, M); //TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem, //and occupancy is likely shmem capacity bound rle_fr_poly_cuda_kernel<<<num_of_poly, 1024, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(), d_poly_rel_idx.data_ptr<int>(), M, M, (uint*) d_cnts_t.data_ptr<int>(), d_x_t.data_ptr<int>(), d_y_t.data_ptr<int>(), d_u_t.data_ptr<int>(), d_v_t.data_ptr<int>(), (uint*) d_a_t.data_ptr<int>(), (uint*) d_b_t.data_ptr<int>(), d_num_of_counts_t.data_ptr<int>()); decode_rle_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_num_of_counts_t.data_ptr<int>(), (uint*) d_cnts_t.data_ptr<int>(), M, M, d_mask_t.data_ptr<byte>()); merge_masks_cuda_kernel<<<num_of_anchors, 256, 0, stream.stream()>>>(d_mask_t.data<byte>(), result.data_ptr<float>(), M, d_per_anchor_poly_idx.data_ptr<int>(), num_of_anchors); return result; }
28499906445505eaed34b64c42eccdd6f86e3ede.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nonseparable.h" #include "common.h" #ifdef SEPARATE_COMPILATION // Required for separate compilation (see Makefile) #ifndef CONSTMEM_FILTERS_NS #define CONSTMEM_FILTERS_NS __constant__ DTYPE c_kern_LL[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; __constant__ DTYPE c_kern_LH[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; __constant__ DTYPE c_kern_HL[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; __constant__ DTYPE c_kern_HH[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; #endif #endif // outer product of arrays "a", "b" of length "len" DTYPE* w_outer(DTYPE* a, DTYPE* b, int len) { DTYPE* res = (DTYPE*) calloc(len*len, sizeof(DTYPE)); for (int i = 0; i < len; i++) { for (int j = 0; j < len; j++) { res[i*len+j] = a[i]*b[j]; } } return res; } /// Compute the four filters A, H, V, D from a family name. /// These filters are separable, i.e computed from 1D filters. /// wname: name of the filter ("haar", "db3", "sym4", ...) /// direction: 1 for forward transform, -1 for inverse transform /// Returns : the filter width "hlen" if success ; a negative value otherwise. int w_compute_filters(const char* wname, int direction, int do_swt) { if (direction == 0) { puts("ERROR: w_compute_filters(): please specify a direction for second argument : +1 for forward, -1 for inverse)"); return -1; } int hlen = 0; DTYPE* f1_l; // 1D lowpass DTYPE* f1_h; // 1D highpass DTYPE* f2_a, *f2_h, *f2_v, *f2_d; // 2D filters // Haar filters has specific kernels if (!do_swt) { if ((!strcasecmp(wname, "haar")) || (!strcasecmp(wname, "db1")) || (!strcasecmp(wname, "bior1.1")) || (!strcasecmp(wname, "rbior1.1"))) { return 2; } } // Browse available filters (see filters.h) int i; for (i = 0; i < 72; i++) { if (!strcasecmp(wname, all_filters[i].wname)) { hlen = all_filters[i].hlen; if (direction > 0) { f1_l = all_filters[i].f_l; f1_h = all_filters[i].f_h; } else { f1_l = all_filters[i].i_l; f1_h = all_filters[i].i_h; } break; } } if (hlen == 0) { printf("ERROR: w_compute_filters(): unknown filter %s\n", wname); return -2; } // Create the separable 2D filters f2_a = w_outer(f1_l, f1_l, hlen); f2_h = w_outer(f1_l, f1_h, hlen); // CHECKME f2_v = w_outer(f1_h, f1_l, hlen); f2_d = w_outer(f1_h, f1_h, hlen); // Copy the filters to device constant memory hipMemcpyToSymbol(c_kern_LL, f2_a, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_kern_LH, f2_h, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice); // CHECKME hipMemcpyToSymbol(c_kern_HL, f2_v, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(c_kern_HH, f2_d, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice); return hlen; } int w_set_filters_forward_nonseparable(DTYPE* filter1, DTYPE* filter2, DTYPE* filter3, DTYPE* filter4, uint len) { if (hipMemcpyToSymbol(c_kern_LL, filter1, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess || hipMemcpyToSymbol(c_kern_LH, filter2, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess || hipMemcpyToSymbol(c_kern_HL, filter3, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess || hipMemcpyToSymbol(c_kern_HH, filter4, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess) { return -3; } return 0; } int w_set_filters_inverse_nonseparable(DTYPE* filter1, DTYPE* filter2, DTYPE* filter3, DTYPE* filter4, uint len) { if (hipMemcpyToSymbol(c_kern_LL, filter1, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess || hipMemcpyToSymbol(c_kern_LH, filter2, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess || hipMemcpyToSymbol(c_kern_HL, filter3, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess || hipMemcpyToSymbol(c_kern_HH, filter4, len*len*sizeof(DTYPE), 0, hipMemcpyHostToDevice) != hipSuccess) { return -3; } return 0; } // must be run with grid size = (Nc/2, Nr/2) where Nr = numrows of input image __global__ void w_kern_forward(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; int Nr_is_odd = (Nr & 1); int Nr2 = (Nr + Nr_is_odd)/2; int Nc_is_odd = (Nc & 1); int Nc2 = (Nc + Nc_is_odd)/2; if (gidy < Nr2 && gidx < Nc2) { int c, hL, hR; if (hlen & 1) { // odd kernel size c = hlen/2; hL = c; hR = c; } else { // even kernel size : center is shifted to the left c = hlen/2 - 1; hL = c; hR = c+1; } DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; DTYPE img_val; // Convolution with periodic boundaries extension. // The following can be sped-up by splitting into 3*3 loops, but it would be a nightmare for readability for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy*2 - c + jy; if (idx_y < 0) idx_y += (Nr + Nr_is_odd); // if N is odd, image is virtually extended // no "else if", since idx_y can be > N-1 after being incremented if (idx_y > Nr-1) { if ((idx_y == Nr) && (Nr_is_odd)) idx_y--; // if N is odd, repeat the right-most element else idx_y -= (Nr + Nr_is_odd); // if N is odd, image is virtually extended } for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx*2 - c + jx; if (idx_x < 0) idx_x += (Nc + Nc_is_odd); // if N is odd, image is virtually extended // no "else if", since idx_x can be > N-1 after being incremented if (idx_x > Nc-1) { if ((idx_x == Nc) && (Nc_is_odd)) idx_x--; // if N is odd, repeat the right-most element else idx_x -= (Nc + Nc_is_odd); // if N is odd, image is virtually extended } img_val = img[idx_y*Nc + idx_x]; res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; } } c_a[gidy* Nc2 + gidx] = res_a; c_h[gidy* Nc2 + gidx] = res_h; c_v[gidy* Nc2 + gidx] = res_v; c_d[gidy* Nc2 + gidx] = res_d; } } // must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input __global__ void w_kern_inverse(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int Nr2, int Nc2, int hlen) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; if (gidy < Nr2 && gidx < Nc2) { int c, hL, hR; int hlen2 = hlen/2; // Convolutions with even/odd indices of the kernels if (hlen2 & 1) { // odd half-kernel size c = hlen2/2; hL = c; hR = c; } else { // even half-kernel size : center is shifted to the RIGHT for reconstruction. c = hlen2/2 - 0; hL = c; hR = c-1; // virtual id for shift // TODO : for the very first convolution (on the edges), this is not exactly accurate (?) gidx += 1; gidy += 1; } int jy1 = c - gidy/2; int jy2 = Nr - 1 - gidy/2 + c; int jx1 = c - gidx/2; int jx2 = Nc - 1 - gidx/2 + c; // There are 4 threads/coeff index. Each thread will do a convolution with the even/odd indices of the kernels along each dimension. int offset_x = 1-(gidx & 1); int offset_y = 1-(gidy & 1); DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy/2 - c + jy; if (jy < jy1) idx_y += Nr; if (jy > jy2) idx_y -= Nr; for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx/2 - c + jx; if (jx < jx1) idx_x += Nc; if (jx > jx2) idx_x -= Nc; res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; } } if ((hlen2 & 1) == 1) img[gidy * Nc2 + gidx] = res_a + res_h + res_v + res_d; else img[(gidy-1) * Nc2 + (gidx-1)] = res_a + res_h + res_v + res_d; } } int w_forward(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; int tpb = 16; // TODO : tune for max perfs. int Nc2 = Nc, Nr2 = Nr; int Nc2_old = Nc2, Nr2_old = Nr2; w_div2(&Nr2); w_div2(&Nc2); DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; // First level dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1); dim3 n_threads_per_block = dim3(tpb, tpb, 1); hipLaunchKernelGGL(( w_kern_forward), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen); for (int i=1; i < levels; i++) { Nr2_old = Nr2; Nc2_old = Nc2; w_div2(&Nr2); w_div2(&Nc2); n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1); hipLaunchKernelGGL(( w_kern_forward), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr2_old, Nc2_old, hlen); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels > 1) && ((levels & 1) == 0)) hipMemcpy(d_coeffs[0], d_tmp, Nr2*Nc2*sizeof(DTYPE), hipMemcpyDeviceToDevice); return 0; } int w_inverse(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; // Table of sizes. FIXME: consider adding this in the w_info structure int tNr[levels+1]; tNr[0] = Nr; int tNc[levels+1]; tNc[0] = Nc; for (int i = 1; i <= levels; i++) { tNr[i] = tNr[i-1]; tNc[i] = tNc[i-1]; w_div2(tNr + i); w_div2(tNc + i); } int tpb = 16; // TODO : tune for max perfs. DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; dim3 n_threads_per_block = dim3(tpb, tpb, 1); dim3 n_blocks; for (int i = levels-1; i >= 1; i--) { n_blocks = dim3(w_iDivUp(tNc[i], tpb), w_iDivUp(tNr[i], tpb), 1); hipLaunchKernelGGL(( w_kern_inverse), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], tNr[i+1], tNc[i+1], tNr[i], tNc[i], hlen); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels > 1) && ((levels & 1) == 0)) hipMemcpy(d_coeffs[0], d_tmp, tNr[1]*tNc[1]*sizeof(DTYPE), hipMemcpyDeviceToDevice); // First level n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); hipLaunchKernelGGL(( w_kern_inverse), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], tNr[1], tNc[1], Nr, Nc, hlen); return 0; } /// ---------------------------------------------------------------------------- /// ------------------------- Undecimated DWT -------------------------------- /// ---------------------------------------------------------------------------- // must be run with grid size = (Nc, Nr) where Nr = numrows of input image __global__ void w_kern_forward_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; if (gidy < Nr && gidx < Nc) { int factor = 1 << (level - 1); int c, hL, hR; if (hlen & 1) { // odd kernel size c = hlen/2; hL = c; hR = c; } else { // even kernel size : center is shifted to the left c = hlen/2 - 1; hL = c; hR = c+1; } c *= factor; int jx1 = c - gidx; int jx2 = Nc - 1 - gidx + c; int jy1 = c - gidy; int jy2 = Nr - 1 - gidy + c; DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; DTYPE img_val; // Convolution with periodic boundaries extension. // The filters are 2-upsampled at each level : [h0, h1, h2, h3] --> [h0, 0, h1, 0, h2, 0, h3, 0] for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy - c + factor*jy; if (factor*jy < jy1) idx_y += Nr; if (factor*jy > jy2) idx_y -= Nr; for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx + jx*factor - c; if (factor*jx < jx1) idx_x += Nc; if (factor*jx > jx2) idx_x -= Nc; img_val = img[idx_y*Nc + idx_x]; res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; } } c_a[gidy* Nc + gidx] = res_a; c_h[gidy* Nc + gidx] = res_h; c_v[gidy* Nc + gidx] = res_v; c_d[gidy* Nc + gidx] = res_d; } } // must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input __global__ void w_kern_inverse_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; if (gidy < Nr && gidx < Nc) { int factor = 1 << (level - 1); int c, hL, hR; if (hlen & 1) { // odd half-kernel size c = hlen/2; hL = c; hR = c; } else { // even half-kernel size : center is shifted to the RIGHT for reconstruction. c = hlen/2 - 0; hL = c; hR = c-1; } c *= factor; int jy1 = c - gidy; int jy2 = Nr - 1 - gidy + c; int jx1 = c - gidx; int jx2 = Nc - 1 - gidx + c; DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy - c + jy*factor; if (factor*jy < jy1) idx_y += Nr; if (factor*jy > jy2) idx_y -= Nr; for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx - c + jx*factor; if (factor*jx < jx1) idx_x += Nc; if (factor*jx > jx2) idx_x -= Nc; res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; } } img[gidy * Nc + gidx] = res_a + res_h + res_v + res_d; } } int w_forward_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; // First level int tpb = 16; // TODO : tune for max perfs. dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); dim3 n_threads_per_block = dim3(tpb, tpb, 1); hipLaunchKernelGGL(( w_kern_forward_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1); for (int i=1; i < levels; i++) { hipLaunchKernelGGL(( w_kern_forward_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice); return 0; } int w_inverse_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; int tpb = 16; // TODO : tune for max perfs. dim3 n_threads_per_block = dim3(tpb, tpb, 1); dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); for (int i = levels-1; i >= 1; i--) { hipLaunchKernelGGL(( w_kern_inverse_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice); // First scale n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); hipLaunchKernelGGL(( w_kern_inverse_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1); return 0; }
28499906445505eaed34b64c42eccdd6f86e3ede.cu
#include "nonseparable.h" #include "common.h" #ifdef SEPARATE_COMPILATION // Required for separate compilation (see Makefile) #ifndef CONSTMEM_FILTERS_NS #define CONSTMEM_FILTERS_NS __constant__ DTYPE c_kern_LL[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; __constant__ DTYPE c_kern_LH[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; __constant__ DTYPE c_kern_HL[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; __constant__ DTYPE c_kern_HH[MAX_FILTER_WIDTH * MAX_FILTER_WIDTH]; #endif #endif // outer product of arrays "a", "b" of length "len" DTYPE* w_outer(DTYPE* a, DTYPE* b, int len) { DTYPE* res = (DTYPE*) calloc(len*len, sizeof(DTYPE)); for (int i = 0; i < len; i++) { for (int j = 0; j < len; j++) { res[i*len+j] = a[i]*b[j]; } } return res; } /// Compute the four filters A, H, V, D from a family name. /// These filters are separable, i.e computed from 1D filters. /// wname: name of the filter ("haar", "db3", "sym4", ...) /// direction: 1 for forward transform, -1 for inverse transform /// Returns : the filter width "hlen" if success ; a negative value otherwise. int w_compute_filters(const char* wname, int direction, int do_swt) { if (direction == 0) { puts("ERROR: w_compute_filters(): please specify a direction for second argument : +1 for forward, -1 for inverse)"); return -1; } int hlen = 0; DTYPE* f1_l; // 1D lowpass DTYPE* f1_h; // 1D highpass DTYPE* f2_a, *f2_h, *f2_v, *f2_d; // 2D filters // Haar filters has specific kernels if (!do_swt) { if ((!strcasecmp(wname, "haar")) || (!strcasecmp(wname, "db1")) || (!strcasecmp(wname, "bior1.1")) || (!strcasecmp(wname, "rbior1.1"))) { return 2; } } // Browse available filters (see filters.h) int i; for (i = 0; i < 72; i++) { if (!strcasecmp(wname, all_filters[i].wname)) { hlen = all_filters[i].hlen; if (direction > 0) { f1_l = all_filters[i].f_l; f1_h = all_filters[i].f_h; } else { f1_l = all_filters[i].i_l; f1_h = all_filters[i].i_h; } break; } } if (hlen == 0) { printf("ERROR: w_compute_filters(): unknown filter %s\n", wname); return -2; } // Create the separable 2D filters f2_a = w_outer(f1_l, f1_l, hlen); f2_h = w_outer(f1_l, f1_h, hlen); // CHECKME f2_v = w_outer(f1_h, f1_l, hlen); f2_d = w_outer(f1_h, f1_h, hlen); // Copy the filters to device constant memory cudaMemcpyToSymbol(c_kern_LL, f2_a, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_kern_LH, f2_h, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice); // CHECKME cudaMemcpyToSymbol(c_kern_HL, f2_v, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_kern_HH, f2_d, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice); return hlen; } int w_set_filters_forward_nonseparable(DTYPE* filter1, DTYPE* filter2, DTYPE* filter3, DTYPE* filter4, uint len) { if (cudaMemcpyToSymbol(c_kern_LL, filter1, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess || cudaMemcpyToSymbol(c_kern_LH, filter2, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess || cudaMemcpyToSymbol(c_kern_HL, filter3, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess || cudaMemcpyToSymbol(c_kern_HH, filter4, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess) { return -3; } return 0; } int w_set_filters_inverse_nonseparable(DTYPE* filter1, DTYPE* filter2, DTYPE* filter3, DTYPE* filter4, uint len) { if (cudaMemcpyToSymbol(c_kern_LL, filter1, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess || cudaMemcpyToSymbol(c_kern_LH, filter2, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess || cudaMemcpyToSymbol(c_kern_HL, filter3, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess || cudaMemcpyToSymbol(c_kern_HH, filter4, len*len*sizeof(DTYPE), 0, cudaMemcpyHostToDevice) != cudaSuccess) { return -3; } return 0; } // must be run with grid size = (Nc/2, Nr/2) where Nr = numrows of input image __global__ void w_kern_forward(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; int Nr_is_odd = (Nr & 1); int Nr2 = (Nr + Nr_is_odd)/2; int Nc_is_odd = (Nc & 1); int Nc2 = (Nc + Nc_is_odd)/2; if (gidy < Nr2 && gidx < Nc2) { int c, hL, hR; if (hlen & 1) { // odd kernel size c = hlen/2; hL = c; hR = c; } else { // even kernel size : center is shifted to the left c = hlen/2 - 1; hL = c; hR = c+1; } DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; DTYPE img_val; // Convolution with periodic boundaries extension. // The following can be sped-up by splitting into 3*3 loops, but it would be a nightmare for readability for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy*2 - c + jy; if (idx_y < 0) idx_y += (Nr + Nr_is_odd); // if N is odd, image is virtually extended // no "else if", since idx_y can be > N-1 after being incremented if (idx_y > Nr-1) { if ((idx_y == Nr) && (Nr_is_odd)) idx_y--; // if N is odd, repeat the right-most element else idx_y -= (Nr + Nr_is_odd); // if N is odd, image is virtually extended } for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx*2 - c + jx; if (idx_x < 0) idx_x += (Nc + Nc_is_odd); // if N is odd, image is virtually extended // no "else if", since idx_x can be > N-1 after being incremented if (idx_x > Nc-1) { if ((idx_x == Nc) && (Nc_is_odd)) idx_x--; // if N is odd, repeat the right-most element else idx_x -= (Nc + Nc_is_odd); // if N is odd, image is virtually extended } img_val = img[idx_y*Nc + idx_x]; res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; } } c_a[gidy* Nc2 + gidx] = res_a; c_h[gidy* Nc2 + gidx] = res_h; c_v[gidy* Nc2 + gidx] = res_v; c_d[gidy* Nc2 + gidx] = res_d; } } // must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input __global__ void w_kern_inverse(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int Nr2, int Nc2, int hlen) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; if (gidy < Nr2 && gidx < Nc2) { int c, hL, hR; int hlen2 = hlen/2; // Convolutions with even/odd indices of the kernels if (hlen2 & 1) { // odd half-kernel size c = hlen2/2; hL = c; hR = c; } else { // even half-kernel size : center is shifted to the RIGHT for reconstruction. c = hlen2/2 - 0; hL = c; hR = c-1; // virtual id for shift // TODO : for the very first convolution (on the edges), this is not exactly accurate (?) gidx += 1; gidy += 1; } int jy1 = c - gidy/2; int jy2 = Nr - 1 - gidy/2 + c; int jx1 = c - gidx/2; int jx2 = Nc - 1 - gidx/2 + c; // There are 4 threads/coeff index. Each thread will do a convolution with the even/odd indices of the kernels along each dimension. int offset_x = 1-(gidx & 1); int offset_y = 1-(gidy & 1); DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy/2 - c + jy; if (jy < jy1) idx_y += Nr; if (jy > jy2) idx_y -= Nr; for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx/2 - c + jx; if (jx < jx1) idx_x += Nc; if (jx > jx2) idx_x -= Nc; res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))]; } } if ((hlen2 & 1) == 1) img[gidy * Nc2 + gidx] = res_a + res_h + res_v + res_d; else img[(gidy-1) * Nc2 + (gidx-1)] = res_a + res_h + res_v + res_d; } } int w_forward(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; int tpb = 16; // TODO : tune for max perfs. int Nc2 = Nc, Nr2 = Nr; int Nc2_old = Nc2, Nr2_old = Nr2; w_div2(&Nr2); w_div2(&Nc2); DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; // First level dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1); dim3 n_threads_per_block = dim3(tpb, tpb, 1); w_kern_forward<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen); for (int i=1; i < levels; i++) { Nr2_old = Nr2; Nc2_old = Nc2; w_div2(&Nr2); w_div2(&Nc2); n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1); w_kern_forward<<<n_blocks, n_threads_per_block>>>(d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr2_old, Nc2_old, hlen); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels > 1) && ((levels & 1) == 0)) cudaMemcpy(d_coeffs[0], d_tmp, Nr2*Nc2*sizeof(DTYPE), cudaMemcpyDeviceToDevice); return 0; } int w_inverse(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; // Table of sizes. FIXME: consider adding this in the w_info structure int tNr[levels+1]; tNr[0] = Nr; int tNc[levels+1]; tNc[0] = Nc; for (int i = 1; i <= levels; i++) { tNr[i] = tNr[i-1]; tNc[i] = tNc[i-1]; w_div2(tNr + i); w_div2(tNc + i); } int tpb = 16; // TODO : tune for max perfs. DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; dim3 n_threads_per_block = dim3(tpb, tpb, 1); dim3 n_blocks; for (int i = levels-1; i >= 1; i--) { n_blocks = dim3(w_iDivUp(tNc[i], tpb), w_iDivUp(tNr[i], tpb), 1); w_kern_inverse<<<n_blocks, n_threads_per_block>>>(d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], tNr[i+1], tNc[i+1], tNr[i], tNc[i], hlen); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels > 1) && ((levels & 1) == 0)) cudaMemcpy(d_coeffs[0], d_tmp, tNr[1]*tNc[1]*sizeof(DTYPE), cudaMemcpyDeviceToDevice); // First level n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); w_kern_inverse<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], tNr[1], tNc[1], Nr, Nc, hlen); return 0; } /// ---------------------------------------------------------------------------- /// ------------------------- Undecimated DWT -------------------------------- /// ---------------------------------------------------------------------------- // must be run with grid size = (Nc, Nr) where Nr = numrows of input image __global__ void w_kern_forward_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; if (gidy < Nr && gidx < Nc) { int factor = 1 << (level - 1); int c, hL, hR; if (hlen & 1) { // odd kernel size c = hlen/2; hL = c; hR = c; } else { // even kernel size : center is shifted to the left c = hlen/2 - 1; hL = c; hR = c+1; } c *= factor; int jx1 = c - gidx; int jx2 = Nc - 1 - gidx + c; int jy1 = c - gidy; int jy2 = Nr - 1 - gidy + c; DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; DTYPE img_val; // Convolution with periodic boundaries extension. // The filters are 2-upsampled at each level : [h0, h1, h2, h3] --> [h0, 0, h1, 0, h2, 0, h3, 0] for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy - c + factor*jy; if (factor*jy < jy1) idx_y += Nr; if (factor*jy > jy2) idx_y -= Nr; for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx + jx*factor - c; if (factor*jx < jx1) idx_x += Nc; if (factor*jx > jx2) idx_x -= Nc; img_val = img[idx_y*Nc + idx_x]; res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]; res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]; } } c_a[gidy* Nc + gidx] = res_a; c_h[gidy* Nc + gidx] = res_h; c_v[gidy* Nc + gidx] = res_v; c_d[gidy* Nc + gidx] = res_d; } } // must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input __global__ void w_kern_inverse_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) { int gidx = threadIdx.x + blockIdx.x*blockDim.x; int gidy = threadIdx.y + blockIdx.y*blockDim.y; if (gidy < Nr && gidx < Nc) { int factor = 1 << (level - 1); int c, hL, hR; if (hlen & 1) { // odd half-kernel size c = hlen/2; hL = c; hR = c; } else { // even half-kernel size : center is shifted to the RIGHT for reconstruction. c = hlen/2 - 0; hL = c; hR = c-1; } c *= factor; int jy1 = c - gidy; int jy2 = Nr - 1 - gidy + c; int jx1 = c - gidx; int jx2 = Nc - 1 - gidx + c; DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0; for (int jy = 0; jy <= hR+hL; jy++) { int idx_y = gidy - c + jy*factor; if (factor*jy < jy1) idx_y += Nr; if (factor*jy > jy2) idx_y -= Nr; for (int jx = 0; jx <= hR+hL; jx++) { int idx_x = gidx - c + jx*factor; if (factor*jx < jx1) idx_x += Nc; if (factor*jx > jx2) idx_x -= Nc; res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4; } } img[gidy * Nc + gidx] = res_a + res_h + res_v + res_d; } } int w_forward_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; // First level int tpb = 16; // TODO : tune for max perfs. dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); dim3 n_threads_per_block = dim3(tpb, tpb, 1); w_kern_forward_swt<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1); for (int i=1; i < levels; i++) { w_kern_forward_swt<<<n_blocks, n_threads_per_block>>>(d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice); return 0; } int w_inverse_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) { int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen; DTYPE* d_tmp1, *d_tmp2; d_tmp1 = d_coeffs[0]; d_tmp2 = d_tmp; int tpb = 16; // TODO : tune for max perfs. dim3 n_threads_per_block = dim3(tpb, tpb, 1); dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); for (int i = levels-1; i >= 1; i--) { w_kern_inverse_swt<<<n_blocks, n_threads_per_block>>>(d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1); w_swap_ptr(&d_tmp1, &d_tmp2); } if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice); // First scale n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1); w_kern_inverse_swt<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1); return 0; }
9d4df3730b3b552b9dfc816c5d3d1d807b88e1f2.hip
// !!! This is a file automatically generated by hipify!!! // To compute histogram with atomic operations */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> // Variables float* data_h; // host vectors unsigned int* hist_h; float* data_d; // device vectors unsigned int* hist_d; unsigned int* hist_c; // CPU solution // Functions void RandomUniform(float*, long); void RandomNormal(float*, long); void RandomExp(float*, long); __global__ void hist_gmem(float *data, const long N, unsigned int *hist, const int bins, const float Rmin, const float binsize) { // use global memory and atomic addition long i = threadIdx.x + blockIdx.x * blockDim.x; long stride = blockDim.x * gridDim.x; // if( (index > bins-1) || (index < 0)) { // printf("data[%d]=%f, index=%d\n",i,data[i],index); // } while (i < N) { int index = (int)((data[i]-Rmin)/binsize); if( (index > bins-1) || (index < 0)) { index = bins - 1 ; } atomicAdd(&hist[index],1); i += stride; // goto the next grid } __syncthreads(); } int main(void) { int gid; // Error code to check return values for CUDA calls hipError_t err = hipSuccess; scanf("%d",&gid); err = hipSetDevice(gid); if (err != hipSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Set GPU with device ID = %d\n", gid); hipSetDevice(gid); printf("To find the histogram of a data set (with real numbers): \n"); long N; int bins,index; float Rmin, Rmax, binsize; printf("Enter the size of the data vector: "); scanf("%ld",&N); printf("%ld\n",N); long size = N * sizeof(float); printf("Enter the data range [Rmin, Rmax] for the histogram: "); scanf("%f %f",&Rmin, &Rmax); printf("%f %f\n",Rmin, Rmax); fflush(stdout); printf("Enter the number of bins of the histogram: "); scanf("%d",&bins); printf("%d\n",bins); fflush(stdout); int bsize = bins*sizeof(int); binsize = (Rmax - Rmin)/(float)bins; data_h = (float*)malloc(size); hist_h = (unsigned int*)malloc(bsize); // Check memory allocations if(data_h == NULL || hist_h == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(int i=0; i<bins; i++) hist_h[i]=0; // initialize the data_h vector // srand(time(NULL)); // initialize the seed with the current time srand(12345); printf("Starting to generate data by RNG\n"); fflush(stdout); // RandomUniform(data_h, N); // uniform deviate in (0,1) // RandomNormal(data_h, N); // Gaussian deviate with sigma = 1 RandomExp(data_h, N); // Exp printf("Finish the generaton of data\n"); fflush(stdout); int threadsPerBlock; printf("Enter the number of threads per block: "); scanf("%d",&threadsPerBlock); printf("%d\n",threadsPerBlock); fflush(stdout); if( threadsPerBlock > 1024 ) { printf("The number of threads per block must be less than 1024 ! \n"); exit(0); } int blocksPerGrid; printf("Enter the number of blocks per grid: "); scanf("%d",&blocksPerGrid); printf("%d\n",blocksPerGrid); if( blocksPerGrid > 2147483647 ) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(0); } printf("The number of blocks is %d\n", blocksPerGrid); fflush(stdout); int CPU; printf("To compute the histogram with CPU (1/0) ? "); scanf("%d",&CPU); printf("%d\n",CPU); fflush(stdout); // create the timer hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // start the timer hipEventRecord(start,0); // Allocate vectors in device memory hipMalloc((void**)&hist_d, bsize); hipMalloc((void**)&data_d, size); // Copy vectors from host memory to device memory hipMemcpy(data_d, data_h, size, hipMemcpyHostToDevice); hipMemcpy(hist_d, hist_h, bsize, hipMemcpyHostToDevice); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float Intime; hipEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer hipEventRecord(start,0); hipLaunchKernelGGL(( hist_gmem) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, data_d, N, hist_d, bins, Rmin, binsize); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float gputime; hipEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",2*N/(1000000.0*gputime)); // Copy result from device memory to host memory // hist_h contains the result in host memory // start the timer hipEventRecord(start,0); hipMemcpy(hist_h, hist_d, bsize, hipMemcpyDeviceToHost); hipFree(data_d); hipFree(hist_d); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float Outime; hipEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); FILE *out; // save histogram in file out = fopen("hist_gmem.dat","w"); fprintf(out, "Histogram (GPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin fprintf(out,"%f %d \n",x,hist_h[i]); } fclose(out); printf("Histogram (GPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin printf("%f %d \n",x,hist_h[i]); } if(CPU==0) { hipEventDestroy(start); hipEventDestroy(stop); free(data_h); free(hist_h); return 0; } // To compute the CPU reference solution hist_c = (unsigned int*)malloc(bsize); for(int i=0; i<bins; i++) hist_c[i]=0; // start the timer hipEventRecord(start,0); for(int i=0; i<N; i++) { index = (int)((data_h[i]-Rmin)/binsize); if( (index > bins-1) || (index < 0)) { //printf("data[%d]=%f, index=%d\n",i,data_h[i],index); //exit(0); index = bins - 1; } hist_c[index]++; } // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float cputime; hipEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",2*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/(gputime_tot)); // destroy the timer hipEventDestroy(start); hipEventDestroy(stop); // check histogram sum equal to the total number of data int sum=0; for(int i=0; i<bins; i++) { sum += hist_c[i]; } if(sum != N) { printf("Error, sum = %d\n",sum); exit(0); } // compare histograms from CPU and GPU for (int i = 0; i < bins; i++) { if(hist_h[i] != hist_c[i]) printf("i=%d, hist_h=%d, hist_c=%d \n", i, hist_h[i], hist_c[i]); } FILE *out1; // save histogram in file out1 = fopen("hist_cpu.dat","w"); fprintf(out1, "Histogram (CPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin fprintf(out1,"%f %d \n",x,hist_c[i]); } fclose(out1); printf("Histogram (CPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin printf("%f %d \n",x,hist_c[i]); } free(data_h); free(hist_h); free(hist_c); return 0; } void RandomUniform(float* data, long n) // RNG with uniform distribution in (0,1) { for(long i = 0; i < n; i++) data[i] = rand()/(float)RAND_MAX; } void RandomNormal(float* data, long n) // RNG with normal distribution, mu=0, sigma=1 { const float Pi = acos(-1.0); for(long i = 0; i < n; i++) { double y = (double) rand() / (float)RAND_MAX; double x = -log(1.0-y); double z = (double) rand() / (float)RAND_MAX; double theta = 2*Pi*z; data[i] = (float) (sqrt(2.0*x)*cos(theta)); } } void RandomExp(float* data, long n) // RNG with normal distribution, mu=0, sigma=1 { for(long i = 0; i < n; i++) { double y = (double) rand() / (float)RAND_MAX; data[i] = (float) (-log(1 - y)); } }
9d4df3730b3b552b9dfc816c5d3d1d807b88e1f2.cu
// To compute histogram with atomic operations */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <cuda_runtime.h> // Variables float* data_h; // host vectors unsigned int* hist_h; float* data_d; // device vectors unsigned int* hist_d; unsigned int* hist_c; // CPU solution // Functions void RandomUniform(float*, long); void RandomNormal(float*, long); void RandomExp(float*, long); __global__ void hist_gmem(float *data, const long N, unsigned int *hist, const int bins, const float Rmin, const float binsize) { // use global memory and atomic addition long i = threadIdx.x + blockIdx.x * blockDim.x; long stride = blockDim.x * gridDim.x; // if( (index > bins-1) || (index < 0)) { // printf("data[%d]=%f, index=%d\n",i,data[i],index); // } while (i < N) { int index = (int)((data[i]-Rmin)/binsize); if( (index > bins-1) || (index < 0)) { index = bins - 1 ; } atomicAdd(&hist[index],1); i += stride; // goto the next grid } __syncthreads(); } int main(void) { int gid; // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; scanf("%d",&gid); err = cudaSetDevice(gid); if (err != cudaSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Set GPU with device ID = %d\n", gid); cudaSetDevice(gid); printf("To find the histogram of a data set (with real numbers): \n"); long N; int bins,index; float Rmin, Rmax, binsize; printf("Enter the size of the data vector: "); scanf("%ld",&N); printf("%ld\n",N); long size = N * sizeof(float); printf("Enter the data range [Rmin, Rmax] for the histogram: "); scanf("%f %f",&Rmin, &Rmax); printf("%f %f\n",Rmin, Rmax); fflush(stdout); printf("Enter the number of bins of the histogram: "); scanf("%d",&bins); printf("%d\n",bins); fflush(stdout); int bsize = bins*sizeof(int); binsize = (Rmax - Rmin)/(float)bins; data_h = (float*)malloc(size); hist_h = (unsigned int*)malloc(bsize); // Check memory allocations if(data_h == NULL || hist_h == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } for(int i=0; i<bins; i++) hist_h[i]=0; // initialize the data_h vector // srand(time(NULL)); // initialize the seed with the current time srand(12345); printf("Starting to generate data by RNG\n"); fflush(stdout); // RandomUniform(data_h, N); // uniform deviate in (0,1) // RandomNormal(data_h, N); // Gaussian deviate with sigma = 1 RandomExp(data_h, N); // Exp printf("Finish the generaton of data\n"); fflush(stdout); int threadsPerBlock; printf("Enter the number of threads per block: "); scanf("%d",&threadsPerBlock); printf("%d\n",threadsPerBlock); fflush(stdout); if( threadsPerBlock > 1024 ) { printf("The number of threads per block must be less than 1024 ! \n"); exit(0); } int blocksPerGrid; printf("Enter the number of blocks per grid: "); scanf("%d",&blocksPerGrid); printf("%d\n",blocksPerGrid); if( blocksPerGrid > 2147483647 ) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(0); } printf("The number of blocks is %d\n", blocksPerGrid); fflush(stdout); int CPU; printf("To compute the histogram with CPU (1/0) ? "); scanf("%d",&CPU); printf("%d\n",CPU); fflush(stdout); // create the timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start the timer cudaEventRecord(start,0); // Allocate vectors in device memory cudaMalloc((void**)&hist_d, bsize); cudaMalloc((void**)&data_d, size); // Copy vectors from host memory to device memory cudaMemcpy(data_d, data_h, size, cudaMemcpyHostToDevice); cudaMemcpy(hist_d, hist_h, bsize, cudaMemcpyHostToDevice); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Intime; cudaEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer cudaEventRecord(start,0); hist_gmem <<< blocksPerGrid, threadsPerBlock >>> (data_d, N, hist_d, bins, Rmin, binsize); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float gputime; cudaEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",2*N/(1000000.0*gputime)); // Copy result from device memory to host memory // hist_h contains the result in host memory // start the timer cudaEventRecord(start,0); cudaMemcpy(hist_h, hist_d, bsize, cudaMemcpyDeviceToHost); cudaFree(data_d); cudaFree(hist_d); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Outime; cudaEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); FILE *out; // save histogram in file out = fopen("hist_gmem.dat","w"); fprintf(out, "Histogram (GPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin fprintf(out,"%f %d \n",x,hist_h[i]); } fclose(out); printf("Histogram (GPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin printf("%f %d \n",x,hist_h[i]); } if(CPU==0) { cudaEventDestroy(start); cudaEventDestroy(stop); free(data_h); free(hist_h); return 0; } // To compute the CPU reference solution hist_c = (unsigned int*)malloc(bsize); for(int i=0; i<bins; i++) hist_c[i]=0; // start the timer cudaEventRecord(start,0); for(int i=0; i<N; i++) { index = (int)((data_h[i]-Rmin)/binsize); if( (index > bins-1) || (index < 0)) { //printf("data[%d]=%f, index=%d\n",i,data_h[i],index); //exit(0); index = bins - 1; } hist_c[index]++; } // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float cputime; cudaEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",2*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/(gputime_tot)); // destroy the timer cudaEventDestroy(start); cudaEventDestroy(stop); // check histogram sum equal to the total number of data int sum=0; for(int i=0; i<bins; i++) { sum += hist_c[i]; } if(sum != N) { printf("Error, sum = %d\n",sum); exit(0); } // compare histograms from CPU and GPU for (int i = 0; i < bins; i++) { if(hist_h[i] != hist_c[i]) printf("i=%d, hist_h=%d, hist_c=%d \n", i, hist_h[i], hist_c[i]); } FILE *out1; // save histogram in file out1 = fopen("hist_cpu.dat","w"); fprintf(out1, "Histogram (CPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin fprintf(out1,"%f %d \n",x,hist_c[i]); } fclose(out1); printf("Histogram (CPU):\n"); for(int i=0; i<bins; i++) { float x=Rmin+(i+0.5)*binsize; // the center of each bin printf("%f %d \n",x,hist_c[i]); } free(data_h); free(hist_h); free(hist_c); return 0; } void RandomUniform(float* data, long n) // RNG with uniform distribution in (0,1) { for(long i = 0; i < n; i++) data[i] = rand()/(float)RAND_MAX; } void RandomNormal(float* data, long n) // RNG with normal distribution, mu=0, sigma=1 { const float Pi = acos(-1.0); for(long i = 0; i < n; i++) { double y = (double) rand() / (float)RAND_MAX; double x = -log(1.0-y); double z = (double) rand() / (float)RAND_MAX; double theta = 2*Pi*z; data[i] = (float) (sqrt(2.0*x)*cos(theta)); } } void RandomExp(float* data, long n) // RNG with normal distribution, mu=0, sigma=1 { for(long i = 0; i < n; i++) { double y = (double) rand() / (float)RAND_MAX; data[i] = (float) (-log(1 - y)); } }
bda04a884e1a7d587789b58aaae80de1c4d0abca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dpra_cudaf.h" #include "dpra_general.cuh" #include <fstream> // TODO namespace DPRA{ /*---------------------------------------CUDA Kernels----------------------------------*/ __global__ void Gaussian_Elimination_3x3_kernel(const float *in_A, float *out_b, int iSize) { float A[3][4]; // The augmented matrix for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < iSize; i += blockDim.x * gridDim.x) { int idA = i * 9; // Index in Mat A int idb = i * 3; // Index in Vec b // Load values from A&b to the augmented matrix A per thread A[0][0] = 9; A[0][1] = in_A[idA + 3]; A[0][2] = in_A[idA + 6]; A[0][3] = out_b[idb + 0]; A[1][0] = A[0][1]; A[1][1] = in_A[idA + 4]; A[1][2] = in_A[idA + 7]; A[1][3] = out_b[idb + 1]; A[2][0] = A[0][2]; A[2][1] = A[1][2]; A[2][2] = in_A[idA + 8]; A[2][3] = out_b[idb + 2]; // Gaussian Elimination with partial pivoting algorithm for (int k = 0; k < 3; k++) { // 1. Find the i-th pivot of the following A[k][i] elements int i_max = -1; float i_pivot = 0.0f; for (int i = k; i < 3; i++) { if (fabsf(i_pivot) - fabsf(A[i][k]) <= 1e-6) { i_pivot = A[i][k]; i_max = i; } } // 2. swap rows for (int j = 0; j < 4; j++) { float temp = A[i_max][j]; A[i_max][j] = A[k][j]; A[k][j] = temp; } // 3. Triangulate the matrix for (int i = k + 1; i < 3; i++) { float mult = A[i][k] / A[k][k]; for (int j = 0; j < 4; j++) { A[i][j] = A[i][j] - A[k][j] * mult; } } } // 4. Find the solution using backward substitution method A[2][3] = A[2][3] / A[2][2]; A[1][3] = (A[1][3] - A[2][3] * A[1][2]) / A[1][1]; A[0][3] = (A[0][3] - A[2][3] * A[0][2] - A[1][3] * A[0][1]) / A[0][0]; // 5. Wirte the results back to out_b out_b[idb + 0] = A[0][3]; out_b[idb + 1] = A[1][3]; out_b[idb + 2] = A[2][3]; } } __global__ void Update_Delta_Phi_Kernel(const float *in_b, const int iSize, hipfftComplex *out_deltaPhiWFT) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < iSize; i += gridDim.x * blockDim.x) { int idb = i * 3; float fDeltaPhi = atan2f(-in_b[idb + 2], in_b[idb + 1]); out_deltaPhiWFT[i].x = cosf(fDeltaPhi); out_deltaPhiWFT[i].y = sinf(fDeltaPhi); } } /*--------------------------------------End CUDA Kernels--------------------------------*/ DPRA_CUDAF::DPRA_CUDAF(const float *v_Phi0, const int iWidth, const int iHeight, const int irefUpdateRate) : m_iImgWidth(iWidth) , m_iImgHeight(iHeight) , m_iPaddedHeight(iHeight + 2) , m_iPaddedWidth(iWidth + 2) , m_rr(irefUpdateRate) , m_h_deltaPhi(nullptr) , m_d_PhiRef(nullptr) , m_d_PhiCurr(nullptr) , m_d_deltaPhiRef(nullptr) , m_d_deltaPhi(nullptr) , m_d_A(nullptr) , m_d_b(nullptr) , m_d_cosPhi(nullptr) , m_d_sinPhi(nullptr) , m_h_img(nullptr) , m_d_img(nullptr) , m_d_img_Padded(nullptr) , m_d_WFT(iWidth, iHeight, WFT_FPA::WFT::WFT_TYPE::WFF, 20, -0.15f, 0.15f, 0.05f, 20, -0.15f, 0.15f, 0.05f, 15, m_d_z, 1) , m_d_deltaPhi_WFT(nullptr) , m_threads2D(BLOCK_SIZE_16, BLOCK_SIZE_16) , m_blocks_2Dshrunk((int)ceil((float)m_iPaddedWidth / (BLOCK_SIZE_16 - 2)), (int)ceil((float)m_iPaddedHeight / (BLOCK_SIZE_16 - 2))) , m_blocks_2D((m_iPaddedWidth + BLOCK_SIZE_16 - 1) / BLOCK_SIZE_16, (m_iPaddedHeight + BLOCK_SIZE_16 - 1) / BLOCK_SIZE_16) { int iImgSize = m_iImgWidth * m_iImgHeight; int iPaddedSize = m_iPaddedHeight * m_iPaddedWidth; // Allocate host pinned memory WFT_FPA::Utils::cucreateptr(m_h_img, iImgSize); WFT_FPA::Utils::cucreateptr(m_h_deltaPhi, iImgSize); // Copy the d_Phi0 to local device array checkCudaErrors(hipMalloc((void**)&m_d_PhiRef, sizeof(float)*iImgSize)); // Allocate memory checkCudaErrors(hipMalloc((void**)&m_d_cosPhi, sizeof(float)*iPaddedSize)); checkCudaErrors(hipMalloc((void**)&m_d_sinPhi, sizeof(float)*iPaddedSize)); checkCudaErrors(hipMalloc((void**)&m_d_img_Padded, sizeof(uchar)*iPaddedSize)); checkCudaErrors(hipMalloc((void**)&m_d_A, sizeof(float) * 9 * iImgSize)); checkCudaErrors(hipMalloc((void**)&m_d_b, sizeof(float) * 3 * iImgSize)); checkCudaErrors(hipMalloc((void**)&m_d_img, sizeof(uchar)*iImgSize)); checkCudaErrors(hipMalloc((void**)&m_d_PhiCurr, sizeof(float)*iImgSize)); checkCudaErrors(hipMalloc((void**)&m_d_deltaPhi_WFT, sizeof(hipfftComplex) * iImgSize)); checkCudaErrors(hipMalloc((void**)&m_d_deltaPhi, sizeof(float)*iImgSize)); // Copy the initial v_Phi0 to local device array checkCudaErrors(hipMalloc((void**)&m_d_PhiRef, sizeof(float)*iImgSize)); checkCudaErrors(hipMemcpy(m_d_PhiRef, v_Phi0, sizeof(float)*iImgSize, hipMemcpyHostToDevice)); // Initialize the reference delta phi to 0's checkCudaErrors(hipMalloc((void**)&m_d_deltaPhiRef, sizeof(float)*iImgSize)); WFT_FPA::Utils::cuInitialize<float>(m_d_deltaPhiRef, 0, iImgSize); // Create CUDA event used for timing and synchronizing checkCudaErrors(hipEventCreate(&m_d_event_start)); checkCudaErrors(hipEventCreate(&m_d_event_1)); checkCudaErrors(hipEventCreate(&m_d_event_2)); checkCudaErrors(hipEventCreate(&m_d_event_3)); checkCudaErrors(hipEventCreate(&m_d_event_4)); checkCudaErrors(hipEventCreate(&m_d_event_5)); checkCudaErrors(hipEventCreate(&m_d_event_6)); checkCudaErrors(hipEventCreate(&m_d_event_7)); } DPRA_CUDAF::~DPRA_CUDAF() { checkCudaErrors(hipEventDestroy(m_d_event_start)); checkCudaErrors(hipEventDestroy(m_d_event_1)); checkCudaErrors(hipEventDestroy(m_d_event_2)); checkCudaErrors(hipEventDestroy(m_d_event_3)); checkCudaErrors(hipEventDestroy(m_d_event_4)); checkCudaErrors(hipEventDestroy(m_d_event_5)); checkCudaErrors(hipEventDestroy(m_d_event_6)); checkCudaErrors(hipEventDestroy(m_d_event_7)); WFT_FPA::Utils::cudaSafeFree(m_d_PhiRef); WFT_FPA::Utils::cudaSafeFree(m_d_PhiCurr); WFT_FPA::Utils::cudaSafeFree(m_d_deltaPhiRef); WFT_FPA::Utils::cudaSafeFree(m_d_A); WFT_FPA::Utils::cudaSafeFree(m_d_b); WFT_FPA::Utils::cudaSafeFree(m_d_deltaPhi); WFT_FPA::Utils::cudaSafeFree(m_d_deltaPhi_WFT); WFT_FPA::Utils::cudaSafeFree(m_d_cosPhi); WFT_FPA::Utils::cudaSafeFree(m_d_sinPhi); WFT_FPA::Utils::cudaSafeFree(m_d_img); WFT_FPA::Utils::cudaSafeFree(m_d_img_Padded); WFT_FPA::Utils::cudestroyptr(m_h_img); WFT_FPA::Utils::cudestroyptr(m_h_deltaPhi); } void DPRA_CUDAF::operator() (const std::vector<cv::cuda::HostMem> &f, std::vector<std::vector<float>> &dPhi_Sum, double &time) { } void DPRA_CUDAF::operator() (const std::vector<std::string> &fileNames, std::vector<std::vector<float>> &dPhi_Sum, double &time) { } void DPRA_CUDAF::dpra_per_frame(const cv::Mat &img, std::vector<float> &dPhi, double &time) { int iSize = m_iImgWidth * m_iImgHeight; int iPaddedSize = m_iPaddedWidth * m_iPaddedHeight; /* I/O */ memcpy(m_h_img, img.data, sizeof(uchar)*iSize); /* Per-frame algorithm starts here */ hipEventRecord(m_d_event_start); /* 1. Load the image f into device padded memory */ checkCudaErrors(hipMemcpyAsync(m_d_img, m_h_img, sizeof(uchar)*iSize, hipMemcpyHostToDevice)); load_img_padding(m_d_img_Padded, m_d_img, m_iImgWidth, m_iImgHeight, m_iPaddedWidth, m_iPaddedHeight, m_blocks_2D, m_threads2D); hipEventRecord(m_d_event_1); /* 2. construct matrix A and vector b on GPU */ compute_cosPhi_sinPhi(m_d_cosPhi, m_d_sinPhi, m_d_PhiRef, m_iImgWidth, m_iImgHeight, m_iPaddedWidth, m_iPaddedHeight, m_blocks_2D, m_threads2D); get_A_b(m_d_A, m_d_b, m_d_img_Padded, m_d_cosPhi, m_d_sinPhi, m_iImgWidth, m_iImgHeight, m_iPaddedWidth, m_iPaddedHeight, m_blocks_2Dshrunk, m_threads2D); hipEventRecord(m_d_event_2); /* 3. Solve Ax = b and construct the m_h_deltaPhiWFT for, each pixel a thread */ hipLaunchKernelGGL(( Gaussian_Elimination_3x3_kernel), dim3(256), dim3(256), 0, 0, m_d_A, m_d_b, iSize); getLastCudaError("Gaussian_Elimination_3x3_kernel launch failed!"); hipEventRecord(m_d_event_3); hipLaunchKernelGGL(( Update_Delta_Phi_Kernel), dim3(256), dim3(256), 0, 0, m_d_b, iSize, m_d_deltaPhi_WFT); getLastCudaError("Update_Delta_Phi_Kernel launch failed!"); hipEventRecord(m_d_event_4); /* 4. Run the CUDA based WFF */ double d_wft_time = 0; m_d_WFT(m_d_deltaPhi_WFT, m_d_z, d_wft_time); hipEventRecord(m_d_event_5); /* 5. Get the delta phi and current phi */ get_deltaPhi_currPhi(m_d_deltaPhi, m_d_PhiCurr, m_d_deltaPhiRef, m_d_PhiRef, m_d_z.m_d_filtered, iSize); hipEventRecord(m_d_event_6); /* 6. Copy the delta Phi to host */ checkCudaErrors(hipMemcpyAsync(m_h_deltaPhi, m_d_deltaPhi, sizeof(float)*iSize, hipMemcpyDeviceToHost)); hipEventRecord(m_d_event_7); hipEventSynchronize(m_d_event_7); /* END Per-frame algorithm starts here */ /* I/O */ memcpy(dPhi.data(), m_h_deltaPhi, sizeof(float)*iSize); float f_1_time = 0; hipEventElapsedTime(&f_1_time, m_d_event_start, m_d_event_1); float f_2_time = 0; hipEventElapsedTime(&f_2_time, m_d_event_1, m_d_event_2); float f_3_time = 0; hipEventElapsedTime(&f_3_time, m_d_event_2, m_d_event_3); float f_4_time = 0; hipEventElapsedTime(&f_4_time, m_d_event_3, m_d_event_4); float f_5_time = 0; hipEventElapsedTime(&f_5_time, m_d_event_5, m_d_event_6); float f_6_time = 0; hipEventElapsedTime(&f_6_time, m_d_event_6, m_d_event_7); std::cout << "Step 0 running time is: " << f_1_time << "ms" << std::endl; std::cout << "Step 1 running time is: " << f_2_time << "ms" << std::endl; std::cout << "Step 2 running time is: " << f_3_time << "ms" << std::endl; std::cout << "Step 3 running time is: " << f_4_time << "ms" << std::endl; std::cout << "Step 4 running time is: " << d_wft_time << "ms" << std::endl; std::cout << "Step 5 running time is: " << f_5_time << "ms" << std::endl; std::cout << "Step 6 running time is: " << f_6_time << "ms" << std::endl; time = double(f_1_time + f_2_time + f_3_time + f_4_time + f_5_time + f_6_time) + d_wft_time; } void DPRA_CUDAF::update_ref_phi() { checkCudaErrors(hipMemcpyAsync(m_d_PhiRef, m_d_PhiCurr, sizeof(float)*m_iImgWidth*m_iImgHeight, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpyAsync(m_d_deltaPhiRef, m_d_deltaPhi, sizeof(float)*m_iImgWidth*m_iImgHeight, hipMemcpyDeviceToDevice)); } } // namespace DPRA
bda04a884e1a7d587789b58aaae80de1c4d0abca.cu
#include "dpra_cudaf.h" #include "dpra_general.cuh" #include <fstream> // TODO namespace DPRA{ /*---------------------------------------CUDA Kernels----------------------------------*/ __global__ void Gaussian_Elimination_3x3_kernel(const float *in_A, float *out_b, int iSize) { float A[3][4]; // The augmented matrix for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < iSize; i += blockDim.x * gridDim.x) { int idA = i * 9; // Index in Mat A int idb = i * 3; // Index in Vec b // Load values from A&b to the augmented matrix A per thread A[0][0] = 9; A[0][1] = in_A[idA + 3]; A[0][2] = in_A[idA + 6]; A[0][3] = out_b[idb + 0]; A[1][0] = A[0][1]; A[1][1] = in_A[idA + 4]; A[1][2] = in_A[idA + 7]; A[1][3] = out_b[idb + 1]; A[2][0] = A[0][2]; A[2][1] = A[1][2]; A[2][2] = in_A[idA + 8]; A[2][3] = out_b[idb + 2]; // Gaussian Elimination with partial pivoting algorithm for (int k = 0; k < 3; k++) { // 1. Find the i-th pivot of the following A[k][i] elements int i_max = -1; float i_pivot = 0.0f; for (int i = k; i < 3; i++) { if (fabsf(i_pivot) - fabsf(A[i][k]) <= 1e-6) { i_pivot = A[i][k]; i_max = i; } } // 2. swap rows for (int j = 0; j < 4; j++) { float temp = A[i_max][j]; A[i_max][j] = A[k][j]; A[k][j] = temp; } // 3. Triangulate the matrix for (int i = k + 1; i < 3; i++) { float mult = A[i][k] / A[k][k]; for (int j = 0; j < 4; j++) { A[i][j] = A[i][j] - A[k][j] * mult; } } } // 4. Find the solution using backward substitution method A[2][3] = A[2][3] / A[2][2]; A[1][3] = (A[1][3] - A[2][3] * A[1][2]) / A[1][1]; A[0][3] = (A[0][3] - A[2][3] * A[0][2] - A[1][3] * A[0][1]) / A[0][0]; // 5. Wirte the results back to out_b out_b[idb + 0] = A[0][3]; out_b[idb + 1] = A[1][3]; out_b[idb + 2] = A[2][3]; } } __global__ void Update_Delta_Phi_Kernel(const float *in_b, const int iSize, cufftComplex *out_deltaPhiWFT) { for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < iSize; i += gridDim.x * blockDim.x) { int idb = i * 3; float fDeltaPhi = atan2f(-in_b[idb + 2], in_b[idb + 1]); out_deltaPhiWFT[i].x = cosf(fDeltaPhi); out_deltaPhiWFT[i].y = sinf(fDeltaPhi); } } /*--------------------------------------End CUDA Kernels--------------------------------*/ DPRA_CUDAF::DPRA_CUDAF(const float *v_Phi0, const int iWidth, const int iHeight, const int irefUpdateRate) : m_iImgWidth(iWidth) , m_iImgHeight(iHeight) , m_iPaddedHeight(iHeight + 2) , m_iPaddedWidth(iWidth + 2) , m_rr(irefUpdateRate) , m_h_deltaPhi(nullptr) , m_d_PhiRef(nullptr) , m_d_PhiCurr(nullptr) , m_d_deltaPhiRef(nullptr) , m_d_deltaPhi(nullptr) , m_d_A(nullptr) , m_d_b(nullptr) , m_d_cosPhi(nullptr) , m_d_sinPhi(nullptr) , m_h_img(nullptr) , m_d_img(nullptr) , m_d_img_Padded(nullptr) , m_d_WFT(iWidth, iHeight, WFT_FPA::WFT::WFT_TYPE::WFF, 20, -0.15f, 0.15f, 0.05f, 20, -0.15f, 0.15f, 0.05f, 15, m_d_z, 1) , m_d_deltaPhi_WFT(nullptr) , m_threads2D(BLOCK_SIZE_16, BLOCK_SIZE_16) , m_blocks_2Dshrunk((int)ceil((float)m_iPaddedWidth / (BLOCK_SIZE_16 - 2)), (int)ceil((float)m_iPaddedHeight / (BLOCK_SIZE_16 - 2))) , m_blocks_2D((m_iPaddedWidth + BLOCK_SIZE_16 - 1) / BLOCK_SIZE_16, (m_iPaddedHeight + BLOCK_SIZE_16 - 1) / BLOCK_SIZE_16) { int iImgSize = m_iImgWidth * m_iImgHeight; int iPaddedSize = m_iPaddedHeight * m_iPaddedWidth; // Allocate host pinned memory WFT_FPA::Utils::cucreateptr(m_h_img, iImgSize); WFT_FPA::Utils::cucreateptr(m_h_deltaPhi, iImgSize); // Copy the d_Phi0 to local device array checkCudaErrors(cudaMalloc((void**)&m_d_PhiRef, sizeof(float)*iImgSize)); // Allocate memory checkCudaErrors(cudaMalloc((void**)&m_d_cosPhi, sizeof(float)*iPaddedSize)); checkCudaErrors(cudaMalloc((void**)&m_d_sinPhi, sizeof(float)*iPaddedSize)); checkCudaErrors(cudaMalloc((void**)&m_d_img_Padded, sizeof(uchar)*iPaddedSize)); checkCudaErrors(cudaMalloc((void**)&m_d_A, sizeof(float) * 9 * iImgSize)); checkCudaErrors(cudaMalloc((void**)&m_d_b, sizeof(float) * 3 * iImgSize)); checkCudaErrors(cudaMalloc((void**)&m_d_img, sizeof(uchar)*iImgSize)); checkCudaErrors(cudaMalloc((void**)&m_d_PhiCurr, sizeof(float)*iImgSize)); checkCudaErrors(cudaMalloc((void**)&m_d_deltaPhi_WFT, sizeof(cufftComplex) * iImgSize)); checkCudaErrors(cudaMalloc((void**)&m_d_deltaPhi, sizeof(float)*iImgSize)); // Copy the initial v_Phi0 to local device array checkCudaErrors(cudaMalloc((void**)&m_d_PhiRef, sizeof(float)*iImgSize)); checkCudaErrors(cudaMemcpy(m_d_PhiRef, v_Phi0, sizeof(float)*iImgSize, cudaMemcpyHostToDevice)); // Initialize the reference delta phi to 0's checkCudaErrors(cudaMalloc((void**)&m_d_deltaPhiRef, sizeof(float)*iImgSize)); WFT_FPA::Utils::cuInitialize<float>(m_d_deltaPhiRef, 0, iImgSize); // Create CUDA event used for timing and synchronizing checkCudaErrors(cudaEventCreate(&m_d_event_start)); checkCudaErrors(cudaEventCreate(&m_d_event_1)); checkCudaErrors(cudaEventCreate(&m_d_event_2)); checkCudaErrors(cudaEventCreate(&m_d_event_3)); checkCudaErrors(cudaEventCreate(&m_d_event_4)); checkCudaErrors(cudaEventCreate(&m_d_event_5)); checkCudaErrors(cudaEventCreate(&m_d_event_6)); checkCudaErrors(cudaEventCreate(&m_d_event_7)); } DPRA_CUDAF::~DPRA_CUDAF() { checkCudaErrors(cudaEventDestroy(m_d_event_start)); checkCudaErrors(cudaEventDestroy(m_d_event_1)); checkCudaErrors(cudaEventDestroy(m_d_event_2)); checkCudaErrors(cudaEventDestroy(m_d_event_3)); checkCudaErrors(cudaEventDestroy(m_d_event_4)); checkCudaErrors(cudaEventDestroy(m_d_event_5)); checkCudaErrors(cudaEventDestroy(m_d_event_6)); checkCudaErrors(cudaEventDestroy(m_d_event_7)); WFT_FPA::Utils::cudaSafeFree(m_d_PhiRef); WFT_FPA::Utils::cudaSafeFree(m_d_PhiCurr); WFT_FPA::Utils::cudaSafeFree(m_d_deltaPhiRef); WFT_FPA::Utils::cudaSafeFree(m_d_A); WFT_FPA::Utils::cudaSafeFree(m_d_b); WFT_FPA::Utils::cudaSafeFree(m_d_deltaPhi); WFT_FPA::Utils::cudaSafeFree(m_d_deltaPhi_WFT); WFT_FPA::Utils::cudaSafeFree(m_d_cosPhi); WFT_FPA::Utils::cudaSafeFree(m_d_sinPhi); WFT_FPA::Utils::cudaSafeFree(m_d_img); WFT_FPA::Utils::cudaSafeFree(m_d_img_Padded); WFT_FPA::Utils::cudestroyptr(m_h_img); WFT_FPA::Utils::cudestroyptr(m_h_deltaPhi); } void DPRA_CUDAF::operator() (const std::vector<cv::cuda::HostMem> &f, std::vector<std::vector<float>> &dPhi_Sum, double &time) { } void DPRA_CUDAF::operator() (const std::vector<std::string> &fileNames, std::vector<std::vector<float>> &dPhi_Sum, double &time) { } void DPRA_CUDAF::dpra_per_frame(const cv::Mat &img, std::vector<float> &dPhi, double &time) { int iSize = m_iImgWidth * m_iImgHeight; int iPaddedSize = m_iPaddedWidth * m_iPaddedHeight; /* I/O */ memcpy(m_h_img, img.data, sizeof(uchar)*iSize); /* Per-frame algorithm starts here */ cudaEventRecord(m_d_event_start); /* 1. Load the image f into device padded memory */ checkCudaErrors(cudaMemcpyAsync(m_d_img, m_h_img, sizeof(uchar)*iSize, cudaMemcpyHostToDevice)); load_img_padding(m_d_img_Padded, m_d_img, m_iImgWidth, m_iImgHeight, m_iPaddedWidth, m_iPaddedHeight, m_blocks_2D, m_threads2D); cudaEventRecord(m_d_event_1); /* 2. construct matrix A and vector b on GPU */ compute_cosPhi_sinPhi(m_d_cosPhi, m_d_sinPhi, m_d_PhiRef, m_iImgWidth, m_iImgHeight, m_iPaddedWidth, m_iPaddedHeight, m_blocks_2D, m_threads2D); get_A_b(m_d_A, m_d_b, m_d_img_Padded, m_d_cosPhi, m_d_sinPhi, m_iImgWidth, m_iImgHeight, m_iPaddedWidth, m_iPaddedHeight, m_blocks_2Dshrunk, m_threads2D); cudaEventRecord(m_d_event_2); /* 3. Solve Ax = b and construct the m_h_deltaPhiWFT for, each pixel a thread */ Gaussian_Elimination_3x3_kernel<<<256, 256>>>(m_d_A, m_d_b, iSize); getLastCudaError("Gaussian_Elimination_3x3_kernel launch failed!"); cudaEventRecord(m_d_event_3); Update_Delta_Phi_Kernel<<<256, 256>>>(m_d_b, iSize, m_d_deltaPhi_WFT); getLastCudaError("Update_Delta_Phi_Kernel launch failed!"); cudaEventRecord(m_d_event_4); /* 4. Run the CUDA based WFF */ double d_wft_time = 0; m_d_WFT(m_d_deltaPhi_WFT, m_d_z, d_wft_time); cudaEventRecord(m_d_event_5); /* 5. Get the delta phi and current phi */ get_deltaPhi_currPhi(m_d_deltaPhi, m_d_PhiCurr, m_d_deltaPhiRef, m_d_PhiRef, m_d_z.m_d_filtered, iSize); cudaEventRecord(m_d_event_6); /* 6. Copy the delta Phi to host */ checkCudaErrors(cudaMemcpyAsync(m_h_deltaPhi, m_d_deltaPhi, sizeof(float)*iSize, cudaMemcpyDeviceToHost)); cudaEventRecord(m_d_event_7); cudaEventSynchronize(m_d_event_7); /* END Per-frame algorithm starts here */ /* I/O */ memcpy(dPhi.data(), m_h_deltaPhi, sizeof(float)*iSize); float f_1_time = 0; cudaEventElapsedTime(&f_1_time, m_d_event_start, m_d_event_1); float f_2_time = 0; cudaEventElapsedTime(&f_2_time, m_d_event_1, m_d_event_2); float f_3_time = 0; cudaEventElapsedTime(&f_3_time, m_d_event_2, m_d_event_3); float f_4_time = 0; cudaEventElapsedTime(&f_4_time, m_d_event_3, m_d_event_4); float f_5_time = 0; cudaEventElapsedTime(&f_5_time, m_d_event_5, m_d_event_6); float f_6_time = 0; cudaEventElapsedTime(&f_6_time, m_d_event_6, m_d_event_7); std::cout << "Step 0 running time is: " << f_1_time << "ms" << std::endl; std::cout << "Step 1 running time is: " << f_2_time << "ms" << std::endl; std::cout << "Step 2 running time is: " << f_3_time << "ms" << std::endl; std::cout << "Step 3 running time is: " << f_4_time << "ms" << std::endl; std::cout << "Step 4 running time is: " << d_wft_time << "ms" << std::endl; std::cout << "Step 5 running time is: " << f_5_time << "ms" << std::endl; std::cout << "Step 6 running time is: " << f_6_time << "ms" << std::endl; time = double(f_1_time + f_2_time + f_3_time + f_4_time + f_5_time + f_6_time) + d_wft_time; } void DPRA_CUDAF::update_ref_phi() { checkCudaErrors(cudaMemcpyAsync(m_d_PhiRef, m_d_PhiCurr, sizeof(float)*m_iImgWidth*m_iImgHeight, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpyAsync(m_d_deltaPhiRef, m_d_deltaPhi, sizeof(float)*m_iImgWidth*m_iImgHeight, cudaMemcpyDeviceToDevice)); } } // namespace DPRA
8ba8cbb83cb5045154cd7f0becc95f089ca3e16c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * AES.cpp * * The Advanced Encryption Standard (AES, aka AES) block cipher, * designed by J. Daemen and V. Rijmen. * * @author Paulo S. L. M. Barreto * * This software is hereby placed in the public domain. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <string.h> #include <stdlib.h> #ifdef BENCHMARK #include <stdio.h> #include <time.h> #endif #include "AES.h" #include "AES.tab" #define FULL_UNROLL #ifdef _MSC_VER #define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) #define GETWORD(p) SWAP(*((uint *)(p))) #define PUTWORD(ct, st) (*((uint *)(ct)) = SWAP((st))) #else #define GETWORD(pt) (((uint)(pt)[0] << 24) ^ ((uint)(pt)[1] << 16) ^ ((uint)(pt)[2] << 8) ^ ((uint)(pt)[3])) #define PUTWORD(ct, st) ((ct)[0] = (byte)((st) >> 24), (ct)[1] = (byte)((st) >> 16), (ct)[2] = (byte)((st) >> 8), (ct)[3] = (byte)(st), (st)) #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// AES::AES() { hipMalloc((void**)&ce_sched, sizeof(e_sched)); hipMalloc((void**)&cd_sched, sizeof(d_sched)); } AES::~AES() { Nr = 0; memset(e_sched, 0, sizeof(e_sched)); memset(d_sched, 0, sizeof(d_sched)); hipFree(ce_sched); hipFree(cd_sched); } ////////////////////////////////////////////////////////////////////// // Support methods ////////////////////////////////////////////////////////////////////// void AES::ExpandKey(const byte *cipherKey, uint keyBits) { uint *rek = e_sched; uint i = 0; uint temp; rek[0] = GETWORD(cipherKey ); rek[1] = GETWORD(cipherKey + 4); rek[2] = GETWORD(cipherKey + 8); rek[3] = GETWORD(cipherKey + 12); if (keyBits == 128) { for (;;) { temp = rek[3]; rek[4] = rek[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rek[5] = rek[1] ^ rek[4]; rek[6] = rek[2] ^ rek[5]; rek[7] = rek[3] ^ rek[6]; if (++i == 10) { Nr = 10; return; } rek += 4; } } rek[4] = GETWORD(cipherKey + 16); rek[5] = GETWORD(cipherKey + 20); if (keyBits == 192) { for (;;) { temp = rek[ 5]; rek[ 6] = rek[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rek[ 7] = rek[ 1] ^ rek[ 6]; rek[ 8] = rek[ 2] ^ rek[ 7]; rek[ 9] = rek[ 3] ^ rek[ 8]; if (++i == 8) { Nr = 12; return; } rek[10] = rek[ 4] ^ rek[ 9]; rek[11] = rek[ 5] ^ rek[10]; rek += 6; } } rek[6] = GETWORD(cipherKey + 24); rek[7] = GETWORD(cipherKey + 28); if (keyBits == 256) { for (;;) { temp = rek[ 7]; rek[ 8] = rek[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rek[ 9] = rek[ 1] ^ rek[ 8]; rek[10] = rek[ 2] ^ rek[ 9]; rek[11] = rek[ 3] ^ rek[10]; if (++i == 7) { Nr = 14; return; } temp = rek[11]; rek[12] = rek[ 4] ^ (Te4[(temp >> 24) ] & 0xff000000) ^ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(temp ) & 0xff] & 0x000000ff); rek[13] = rek[ 5] ^ rek[12]; rek[14] = rek[ 6] ^ rek[13]; rek[15] = rek[ 7] ^ rek[14]; rek += 8; } } Nr = 0; // this should never happen } void AES::InvertKey() { uint *rek = e_sched; uint *rdk = d_sched; assert(Nr == 10 || Nr == 12 || Nr == 14); rek += 4*Nr; /* apply the inverse MixColumn transform to all round keys but the first and the last: */ memcpy(rdk, rek, 16); rdk += 4; rek -= 4; for (uint r = 1; r < Nr; r++) { rdk[0] = Td0[Te4[(rek[0] >> 24) ] & 0xff] ^ Td1[Te4[(rek[0] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[0] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[0] ) & 0xff] & 0xff]; rdk[1] = Td0[Te4[(rek[1] >> 24) ] & 0xff] ^ Td1[Te4[(rek[1] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[1] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[1] ) & 0xff] & 0xff]; rdk[2] = Td0[Te4[(rek[2] >> 24) ] & 0xff] ^ Td1[Te4[(rek[2] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[2] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[2] ) & 0xff] & 0xff]; rdk[3] = Td0[Te4[(rek[3] >> 24) ] & 0xff] ^ Td1[Te4[(rek[3] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[3] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[3] ) & 0xff] & 0xff]; rdk += 4; rek -= 4; } memcpy(rdk, rek, 16); } ////////////////////////////////////////////////////////////////////// // Public Interface ////////////////////////////////////////////////////////////////////// /** * Convert one data block from byte[] to int[] representation. */ void AES::byte2int(const byte *b, uint *i) { i[0] = GETWORD(b ); i[1] = GETWORD(b + 4); i[2] = GETWORD(b + 8); i[3] = GETWORD(b + 12); } /** * Convert one data block from int[] to byte[] representation. */ void AES::int2byte(const uint *i, byte *b) { PUTWORD(b , i[0]); PUTWORD(b + 4, i[1]); PUTWORD(b + 8, i[2]); PUTWORD(b + 12, i[3]); } void AES::makeKey(const byte *cipherKey, uint keySize, uint dir) { switch (keySize) { case 16: case 24: case 32: keySize <<= 3; // key size is now in bits break; case 128: case 192: case 256: break; default: throw "Invalid AES key size"; } // assert(dir >= DIR_NONE && dir <= DIR_BOTH); assert(dir <= DIR_BOTH); if (dir != DIR_NONE) { ExpandKey(cipherKey, keySize); hipMemcpy(ce_sched, e_sched, sizeof(e_sched), hipMemcpyHostToDevice); if (dir & DIR_DECRYPT) { InvertKey(); hipMemcpy(cd_sched, d_sched, sizeof(e_sched), hipMemcpyHostToDevice); } } } void AES::encrypt(const uint *pt, uint *ct) { uint *cpt, *cct; uint size = 4*sizeof(uint); hipMalloc((void**)&cpt, size); hipMalloc((void**)&cct, size); hipMemcpy(cpt, pt, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( AES_encrypt), dim3(1),dim3(1), 0, 0, cpt, cct, ce_sched, Nr); hipMemcpy(ct, cct, size, hipMemcpyDeviceToHost); hipFree(cpt); hipFree(cct); } void AES::encrypt_ecb(const uint *pt, uint *ct, uint n = 1) { uint *cpt, *cct; uint size = (n << 2)*sizeof(uint); hipMalloc((void**)&cpt, size); hipMalloc((void**)&cct, size); hipMemcpy(cpt, pt, size, hipMemcpyHostToDevice); struct hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); uint blocks, threads = 1; if(n != 1) { threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock; } blocks = n / threads; dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); #ifdef BENCHMARK clock_t start = clock(); hipLaunchKernelGGL(( AES_encrypt), dim3(dimGrid), dim3(dimBlock), 0, 0, cpt, cct, ce_sched, Nr); clock_t end = clock(); printf("Encryption alone takes %d/%d seconds.\n", end-start, CLOCKS_PER_SEC); #else hipLaunchKernelGGL(( AES_encrypt), dim3(dimGrid), dim3(dimBlock), 0, 0, cpt, cct, ce_sched, Nr); #endif #ifndef NO_COPYBACK hipMemcpy(ct, cct, size, hipMemcpyDeviceToHost); hipFree(cpt); hipFree(cct); #endif } #define STREAMS 8 void AES::encrypt_ecb_async(const uint *pt, uint *ct, uint n = 1) { uint *cpt, *cct; uint i, size = (n << 2)*sizeof(uint); uint streamSize = size / STREAMS; uint streamMem = (n << 2) / STREAMS; hipMalloc((void**)&cpt, size); hipMalloc((void**)&cct, size); hipStream_t stream[STREAMS]; struct hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); uint threads = 1; if(n != 1) { threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock; } uint blocks = (n/STREAMS) / threads; dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); for(i = 0; i < STREAMS; i++) { hipStreamCreate(&stream[i]); } for(i = 0; i < STREAMS; i++) { uint offset = i*streamMem; hipError_t r = hipMemcpyAsync(cpt + offset, pt + offset, streamSize, hipMemcpyHostToDevice, stream[i]); } for(i = 0; i < STREAMS; i++) { uint offset = i*streamMem; hipLaunchKernelGGL(( AES_encrypt), dim3(dimGrid), dim3(dimBlock), 0, stream[i], cpt + offset, cct + offset, ce_sched, Nr); } for(i = 0; i < STREAMS; i++) { uint offset = i*streamMem; hipError_t r = hipMemcpyAsync(ct + offset, cct + offset, streamSize, hipMemcpyDeviceToHost, stream[i]); } hipDeviceSynchronize(); hipFree(cpt); hipFree(cct); } void AES::decrypt(const uint *ct, uint *pt) { } __global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = x + y * gridDim.x * blockDim.x; int offset = i << 2; uint s0, s1, s2, s3, t0, t1, t2, t3; /* * map byte array block to cipher state * and add initial round key: */ s0 = pt[offset + 0] ^ rek[0]; s1 = pt[offset + 1] ^ rek[1]; s2 = pt[offset + 2] ^ rek[2]; s3 = pt[offset + 3] ^ rek[3]; /* round 1: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[ 4]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[ 5]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[ 6]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[ 7]; /* round 2: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[ 8]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[ 9]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[10]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[11]; /* round 3: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[12]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[13]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[14]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[15]; /* round 4: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[16]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[17]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[18]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[19]; /* round 5: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[20]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[21]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[22]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[23]; /* round 6: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[24]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[25]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[26]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[27]; /* round 7: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[28]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[29]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[30]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[31]; /* round 8: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[32]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[33]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[34]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[35]; /* round 9: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[36]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[37]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[38]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[39]; if (Nr > 10) { /* round 10: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[40]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[41]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[42]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[43]; /* round 11: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[44]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[45]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[46]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[47]; if (Nr > 12) { /* round 12: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[48]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[49]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[50]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[51]; /* round 13: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[52]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[53]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[54]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[55]; } } rek += Nr << 2; /* * apply last round and * map cipher state to byte array block: */ ct[offset + 0] = (cTe4[(t0 >> 24) ] & 0xff000000) ^ (cTe4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t3 ) & 0xff] & 0x000000ff) ^ rek[0]; ct[offset + 1] = (cTe4[(t1 >> 24) ] & 0xff000000) ^ (cTe4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t0 ) & 0xff] & 0x000000ff) ^ rek[1]; ct[offset + 2] = (cTe4[(t2 >> 24) ] & 0xff000000) ^ (cTe4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t1 ) & 0xff] & 0x000000ff) ^ rek[2]; ct[offset + 3] = (cTe4[(t3 >> 24) ] & 0xff000000) ^ (cTe4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t2 ) & 0xff] & 0x000000ff) ^ rek[3]; } __global__ void AES_decrypt(const uint *ct, uint *pt, uint *rdk, uint Nr) { uint s0, s1, s2, s3, t0, t1, t2, t3; /* * map byte array block to cipher state * and add initial round key: */ s0 = ct[0] ^ rdk[0]; s1 = ct[1] ^ rdk[1]; s2 = ct[2] ^ rdk[2]; s3 = ct[3] ^ rdk[3]; /* round 1: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[ 4]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[ 5]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[ 6]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[ 7]; /* round 2: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[ 8]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[ 9]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[10]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[11]; /* round 3: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[12]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[13]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[14]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[15]; /* round 4: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[16]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[17]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[18]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[19]; /* round 5: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[20]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[21]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[22]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[23]; /* round 6: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[24]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[25]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[26]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[27]; /* round 7: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[28]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[29]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[30]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[31]; /* round 8: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[32]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[33]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[34]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[35]; /* round 9: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[36]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[37]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[38]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[39]; if (Nr > 10) { /* round 10: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[40]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[41]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[42]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[43]; /* round 11: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[44]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[45]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[46]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[47]; if (Nr > 12) { /* round 12: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[48]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[49]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[50]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[51]; /* round 13: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[52]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[53]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[54]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[55]; } } rdk += Nr << 2; /* * apply last round and * map cipher state to byte array block: */ pt[0] = (cTd4[(t0 >> 24) ] & 0xff000000) ^ (cTd4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t1 ) & 0xff] & 0x000000ff) ^ rdk[0]; pt[1] = (cTd4[(t1 >> 24) ] & 0xff000000) ^ (cTd4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t2 ) & 0xff] & 0x000000ff) ^ rdk[1]; pt[2] = (cTd4[(t2 >> 24) ] & 0xff000000) ^ (cTd4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t3 ) & 0xff] & 0x000000ff) ^ rdk[2]; pt[3] = (cTd4[(t3 >> 24) ] & 0xff000000) ^ (cTd4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t0 ) & 0xff] & 0x000000ff) ^ rdk[3]; }
8ba8cbb83cb5045154cd7f0becc95f089ca3e16c.cu
/** * AES.cpp * * The Advanced Encryption Standard (AES, aka AES) block cipher, * designed by J. Daemen and V. Rijmen. * * @author Paulo S. L. M. Barreto * * This software is hereby placed in the public domain. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <string.h> #include <stdlib.h> #ifdef BENCHMARK #include <stdio.h> #include <time.h> #endif #include "AES.h" #include "AES.tab" #define FULL_UNROLL #ifdef _MSC_VER #define SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00) #define GETWORD(p) SWAP(*((uint *)(p))) #define PUTWORD(ct, st) (*((uint *)(ct)) = SWAP((st))) #else #define GETWORD(pt) (((uint)(pt)[0] << 24) ^ ((uint)(pt)[1] << 16) ^ ((uint)(pt)[2] << 8) ^ ((uint)(pt)[3])) #define PUTWORD(ct, st) ((ct)[0] = (byte)((st) >> 24), (ct)[1] = (byte)((st) >> 16), (ct)[2] = (byte)((st) >> 8), (ct)[3] = (byte)(st), (st)) #endif ////////////////////////////////////////////////////////////////////// // Construction/Destruction ////////////////////////////////////////////////////////////////////// AES::AES() { cudaMalloc((void**)&ce_sched, sizeof(e_sched)); cudaMalloc((void**)&cd_sched, sizeof(d_sched)); } AES::~AES() { Nr = 0; memset(e_sched, 0, sizeof(e_sched)); memset(d_sched, 0, sizeof(d_sched)); cudaFree(ce_sched); cudaFree(cd_sched); } ////////////////////////////////////////////////////////////////////// // Support methods ////////////////////////////////////////////////////////////////////// void AES::ExpandKey(const byte *cipherKey, uint keyBits) { uint *rek = e_sched; uint i = 0; uint temp; rek[0] = GETWORD(cipherKey ); rek[1] = GETWORD(cipherKey + 4); rek[2] = GETWORD(cipherKey + 8); rek[3] = GETWORD(cipherKey + 12); if (keyBits == 128) { for (;;) { temp = rek[3]; rek[4] = rek[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rek[5] = rek[1] ^ rek[4]; rek[6] = rek[2] ^ rek[5]; rek[7] = rek[3] ^ rek[6]; if (++i == 10) { Nr = 10; return; } rek += 4; } } rek[4] = GETWORD(cipherKey + 16); rek[5] = GETWORD(cipherKey + 20); if (keyBits == 192) { for (;;) { temp = rek[ 5]; rek[ 6] = rek[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rek[ 7] = rek[ 1] ^ rek[ 6]; rek[ 8] = rek[ 2] ^ rek[ 7]; rek[ 9] = rek[ 3] ^ rek[ 8]; if (++i == 8) { Nr = 12; return; } rek[10] = rek[ 4] ^ rek[ 9]; rek[11] = rek[ 5] ^ rek[10]; rek += 6; } } rek[6] = GETWORD(cipherKey + 24); rek[7] = GETWORD(cipherKey + 28); if (keyBits == 256) { for (;;) { temp = rek[ 7]; rek[ 8] = rek[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon[i]; rek[ 9] = rek[ 1] ^ rek[ 8]; rek[10] = rek[ 2] ^ rek[ 9]; rek[11] = rek[ 3] ^ rek[10]; if (++i == 7) { Nr = 14; return; } temp = rek[11]; rek[12] = rek[ 4] ^ (Te4[(temp >> 24) ] & 0xff000000) ^ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(temp ) & 0xff] & 0x000000ff); rek[13] = rek[ 5] ^ rek[12]; rek[14] = rek[ 6] ^ rek[13]; rek[15] = rek[ 7] ^ rek[14]; rek += 8; } } Nr = 0; // this should never happen } void AES::InvertKey() { uint *rek = e_sched; uint *rdk = d_sched; assert(Nr == 10 || Nr == 12 || Nr == 14); rek += 4*Nr; /* apply the inverse MixColumn transform to all round keys but the first and the last: */ memcpy(rdk, rek, 16); rdk += 4; rek -= 4; for (uint r = 1; r < Nr; r++) { rdk[0] = Td0[Te4[(rek[0] >> 24) ] & 0xff] ^ Td1[Te4[(rek[0] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[0] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[0] ) & 0xff] & 0xff]; rdk[1] = Td0[Te4[(rek[1] >> 24) ] & 0xff] ^ Td1[Te4[(rek[1] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[1] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[1] ) & 0xff] & 0xff]; rdk[2] = Td0[Te4[(rek[2] >> 24) ] & 0xff] ^ Td1[Te4[(rek[2] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[2] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[2] ) & 0xff] & 0xff]; rdk[3] = Td0[Te4[(rek[3] >> 24) ] & 0xff] ^ Td1[Te4[(rek[3] >> 16) & 0xff] & 0xff] ^ Td2[Te4[(rek[3] >> 8) & 0xff] & 0xff] ^ Td3[Te4[(rek[3] ) & 0xff] & 0xff]; rdk += 4; rek -= 4; } memcpy(rdk, rek, 16); } ////////////////////////////////////////////////////////////////////// // Public Interface ////////////////////////////////////////////////////////////////////// /** * Convert one data block from byte[] to int[] representation. */ void AES::byte2int(const byte *b, uint *i) { i[0] = GETWORD(b ); i[1] = GETWORD(b + 4); i[2] = GETWORD(b + 8); i[3] = GETWORD(b + 12); } /** * Convert one data block from int[] to byte[] representation. */ void AES::int2byte(const uint *i, byte *b) { PUTWORD(b , i[0]); PUTWORD(b + 4, i[1]); PUTWORD(b + 8, i[2]); PUTWORD(b + 12, i[3]); } void AES::makeKey(const byte *cipherKey, uint keySize, uint dir) { switch (keySize) { case 16: case 24: case 32: keySize <<= 3; // key size is now in bits break; case 128: case 192: case 256: break; default: throw "Invalid AES key size"; } // assert(dir >= DIR_NONE && dir <= DIR_BOTH); assert(dir <= DIR_BOTH); if (dir != DIR_NONE) { ExpandKey(cipherKey, keySize); cudaMemcpy(ce_sched, e_sched, sizeof(e_sched), cudaMemcpyHostToDevice); if (dir & DIR_DECRYPT) { InvertKey(); cudaMemcpy(cd_sched, d_sched, sizeof(e_sched), cudaMemcpyHostToDevice); } } } void AES::encrypt(const uint *pt, uint *ct) { uint *cpt, *cct; uint size = 4*sizeof(uint); cudaMalloc((void**)&cpt, size); cudaMalloc((void**)&cct, size); cudaMemcpy(cpt, pt, size, cudaMemcpyHostToDevice); AES_encrypt<<<1,1>>>(cpt, cct, ce_sched, Nr); cudaMemcpy(ct, cct, size, cudaMemcpyDeviceToHost); cudaFree(cpt); cudaFree(cct); } void AES::encrypt_ecb(const uint *pt, uint *ct, uint n = 1) { uint *cpt, *cct; uint size = (n << 2)*sizeof(uint); cudaMalloc((void**)&cpt, size); cudaMalloc((void**)&cct, size); cudaMemcpy(cpt, pt, size, cudaMemcpyHostToDevice); struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); uint blocks, threads = 1; if(n != 1) { threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock; } blocks = n / threads; dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); #ifdef BENCHMARK clock_t start = clock(); AES_encrypt<<<dimGrid, dimBlock>>>(cpt, cct, ce_sched, Nr); clock_t end = clock(); printf("Encryption alone takes %d/%d seconds.\n", end-start, CLOCKS_PER_SEC); #else AES_encrypt<<<dimGrid, dimBlock>>>(cpt, cct, ce_sched, Nr); #endif #ifndef NO_COPYBACK cudaMemcpy(ct, cct, size, cudaMemcpyDeviceToHost); cudaFree(cpt); cudaFree(cct); #endif } #define STREAMS 8 void AES::encrypt_ecb_async(const uint *pt, uint *ct, uint n = 1) { uint *cpt, *cct; uint i, size = (n << 2)*sizeof(uint); uint streamSize = size / STREAMS; uint streamMem = (n << 2) / STREAMS; cudaMalloc((void**)&cpt, size); cudaMalloc((void**)&cct, size); cudaStream_t stream[STREAMS]; struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); uint threads = 1; if(n != 1) { threads = (n < prop.maxThreadsPerBlock*2) ? n / 2 : prop.maxThreadsPerBlock; } uint blocks = (n/STREAMS) / threads; dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); for(i = 0; i < STREAMS; i++) { cudaStreamCreate(&stream[i]); } for(i = 0; i < STREAMS; i++) { uint offset = i*streamMem; cudaError_t r = cudaMemcpyAsync(cpt + offset, pt + offset, streamSize, cudaMemcpyHostToDevice, stream[i]); } for(i = 0; i < STREAMS; i++) { uint offset = i*streamMem; AES_encrypt<<<dimGrid, dimBlock, 0, stream[i]>>>(cpt + offset, cct + offset, ce_sched, Nr); } for(i = 0; i < STREAMS; i++) { uint offset = i*streamMem; cudaError_t r = cudaMemcpyAsync(ct + offset, cct + offset, streamSize, cudaMemcpyDeviceToHost, stream[i]); } cudaThreadSynchronize(); cudaFree(cpt); cudaFree(cct); } void AES::decrypt(const uint *ct, uint *pt) { } __global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = x + y * gridDim.x * blockDim.x; int offset = i << 2; uint s0, s1, s2, s3, t0, t1, t2, t3; /* * map byte array block to cipher state * and add initial round key: */ s0 = pt[offset + 0] ^ rek[0]; s1 = pt[offset + 1] ^ rek[1]; s2 = pt[offset + 2] ^ rek[2]; s3 = pt[offset + 3] ^ rek[3]; /* round 1: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[ 4]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[ 5]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[ 6]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[ 7]; /* round 2: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[ 8]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[ 9]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[10]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[11]; /* round 3: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[12]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[13]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[14]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[15]; /* round 4: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[16]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[17]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[18]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[19]; /* round 5: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[20]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[21]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[22]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[23]; /* round 6: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[24]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[25]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[26]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[27]; /* round 7: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[28]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[29]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[30]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[31]; /* round 8: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[32]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[33]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[34]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[35]; /* round 9: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[36]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[37]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[38]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[39]; if (Nr > 10) { /* round 10: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[40]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[41]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[42]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[43]; /* round 11: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[44]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[45]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[46]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[47]; if (Nr > 12) { /* round 12: */ s0 = cTe0[t0 >> 24] ^ cTe1[(t1 >> 16) & 0xff] ^ cTe2[(t2 >> 8) & 0xff] ^ cTe3[t3 & 0xff] ^ rek[48]; s1 = cTe0[t1 >> 24] ^ cTe1[(t2 >> 16) & 0xff] ^ cTe2[(t3 >> 8) & 0xff] ^ cTe3[t0 & 0xff] ^ rek[49]; s2 = cTe0[t2 >> 24] ^ cTe1[(t3 >> 16) & 0xff] ^ cTe2[(t0 >> 8) & 0xff] ^ cTe3[t1 & 0xff] ^ rek[50]; s3 = cTe0[t3 >> 24] ^ cTe1[(t0 >> 16) & 0xff] ^ cTe2[(t1 >> 8) & 0xff] ^ cTe3[t2 & 0xff] ^ rek[51]; /* round 13: */ t0 = cTe0[s0 >> 24] ^ cTe1[(s1 >> 16) & 0xff] ^ cTe2[(s2 >> 8) & 0xff] ^ cTe3[s3 & 0xff] ^ rek[52]; t1 = cTe0[s1 >> 24] ^ cTe1[(s2 >> 16) & 0xff] ^ cTe2[(s3 >> 8) & 0xff] ^ cTe3[s0 & 0xff] ^ rek[53]; t2 = cTe0[s2 >> 24] ^ cTe1[(s3 >> 16) & 0xff] ^ cTe2[(s0 >> 8) & 0xff] ^ cTe3[s1 & 0xff] ^ rek[54]; t3 = cTe0[s3 >> 24] ^ cTe1[(s0 >> 16) & 0xff] ^ cTe2[(s1 >> 8) & 0xff] ^ cTe3[s2 & 0xff] ^ rek[55]; } } rek += Nr << 2; /* * apply last round and * map cipher state to byte array block: */ ct[offset + 0] = (cTe4[(t0 >> 24) ] & 0xff000000) ^ (cTe4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t3 ) & 0xff] & 0x000000ff) ^ rek[0]; ct[offset + 1] = (cTe4[(t1 >> 24) ] & 0xff000000) ^ (cTe4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t0 ) & 0xff] & 0x000000ff) ^ rek[1]; ct[offset + 2] = (cTe4[(t2 >> 24) ] & 0xff000000) ^ (cTe4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t1 ) & 0xff] & 0x000000ff) ^ rek[2]; ct[offset + 3] = (cTe4[(t3 >> 24) ] & 0xff000000) ^ (cTe4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (cTe4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (cTe4[(t2 ) & 0xff] & 0x000000ff) ^ rek[3]; } __global__ void AES_decrypt(const uint *ct, uint *pt, uint *rdk, uint Nr) { uint s0, s1, s2, s3, t0, t1, t2, t3; /* * map byte array block to cipher state * and add initial round key: */ s0 = ct[0] ^ rdk[0]; s1 = ct[1] ^ rdk[1]; s2 = ct[2] ^ rdk[2]; s3 = ct[3] ^ rdk[3]; /* round 1: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[ 4]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[ 5]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[ 6]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[ 7]; /* round 2: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[ 8]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[ 9]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[10]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[11]; /* round 3: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[12]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[13]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[14]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[15]; /* round 4: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[16]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[17]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[18]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[19]; /* round 5: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[20]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[21]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[22]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[23]; /* round 6: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[24]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[25]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[26]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[27]; /* round 7: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[28]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[29]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[30]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[31]; /* round 8: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[32]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[33]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[34]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[35]; /* round 9: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[36]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[37]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[38]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[39]; if (Nr > 10) { /* round 10: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[40]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[41]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[42]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[43]; /* round 11: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[44]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[45]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[46]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[47]; if (Nr > 12) { /* round 12: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[48]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[49]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[50]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[51]; /* round 13: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[52]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[53]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[54]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[55]; } } rdk += Nr << 2; /* * apply last round and * map cipher state to byte array block: */ pt[0] = (cTd4[(t0 >> 24) ] & 0xff000000) ^ (cTd4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t1 ) & 0xff] & 0x000000ff) ^ rdk[0]; pt[1] = (cTd4[(t1 >> 24) ] & 0xff000000) ^ (cTd4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t2 ) & 0xff] & 0x000000ff) ^ rdk[1]; pt[2] = (cTd4[(t2 >> 24) ] & 0xff000000) ^ (cTd4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t3 ) & 0xff] & 0x000000ff) ^ rdk[2]; pt[3] = (cTd4[(t3 >> 24) ] & 0xff000000) ^ (cTd4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t0 ) & 0xff] & 0x000000ff) ^ rdk[3]; }
63ba311dc62f6987977d196a9874f36b6ec31990.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> typedef struct N3 { int x, y, z; } N3; typedef struct P3F3 { float ***x, ***y, ***z; } P3F3; typedef struct P1F3 { float *x, *y, *z; } P1F3; __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ void print_array(N3 N, float ***a) { int j,k; for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { printf("%1.4f\t", a[N.x/2][j][k]); } printf("\n"); } printf("\n"); } __host__ float ***makeArray(N3 N) { float ***f; f = (float ***) calloc (N.x, sizeof(float **)); f[0] = (float **) calloc (N.y*N.x, sizeof(float *)); f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float)); for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y; for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z; return f; } __host__ void set_geometry(N3 N, P3F3 CE) { int i,j,k; for (i=0; i<N.x; i++) { for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { CE.x[i][j][k] = 0.5; CE.y[i][j][k] = 0.5; CE.z[i][j][k] = 0.5; } } } } __global__ void initArrays(N3 N, int Nzpit, P1F3 E, P1F3 H) { int idx; idx = blockIdx.x*blockDim.x + threadIdx.x; //printf("gridDim.x=%d\n",gridDim.x); //printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x); if ( idx < N.x*N.y*Nzpit ) { E.x[idx] = 0; E.y[idx] = 0; E.z[idx] = 0; H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __global__ void updateE(N3 N, int Nzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE) { int tk, idx; tk = threadIdx.x; idx = blockIdx.x*TPB + tk; int i,j,k; int Nyz = N.y*Nzpit; i = idx/Nyz; j = ( idx - i*Nyz )/Nzpit; k = idx - i*Nyz - j*Nzpit; //printf("[%.2d]\t\t[%.2d]\t\t[%.2d,%.2d,%.2d]\t\t[%.2d]\n", blockIdx.x, tk, i, j, k, idx); extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[TPB+1]; float* hz = (float*) &hy[TPB+1]; if ( i<N.x && k<N.z) { hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; hz[tk] = H.z[idx]; if ( tk==TPB-1 && k<N.z-1 ) { hx[tk+1] = H.x[idx+1]; hy[tk+1] = H.y[idx+1]; } } __syncthreads(); if ( i<N.x && k<N.z) { if ( j<N.y-1 && k<N.z-1 ) E.x[idx] += CE.x[idx]*( H.z[idx+Nzpit] - hz[tk] - hy[tk+1] + hy[tk] ); if ( i<N.x-1 && k<N.z-1 ) E.y[idx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyz] + hz[tk] ); if ( i<N.x-1 && j<N.y-1 ) E.z[idx] += CE.z[idx]*( H.y[idx+Nyz] - hy[tk] - H.x[idx+Nzpit] + hx[tk] ); } } __global__ void updateSrc(N3 N, int Nzpit, P1F3 E, int tstep) { int idx, ijk; idx = blockIdx.x*blockDim.x + threadIdx.x; ijk = idx*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + (N.z/2); //printf("idx=%d, ijk=%d\n", idx, ijk); //Ex[ijk] += __sinf(0.1*tstep); if ( idx < N.x ) { E.x[ijk] += sin(0.1*tstep); } } __global__ void updateH(N3 N, int Nzpit, int TPB, P1F3 E, P1F3 H) { int tk, idx; tk = threadIdx.x; idx = blockIdx.x*TPB + tk; int i,j,k; int Nyz = N.y*Nzpit; i = idx/Nyz; j = ( idx - i*Nyz )/Nzpit; k = idx - i*Nyz - j*Nzpit; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[TPB+1]; float* ez = (float*) &ey[TPB+1]; if ( i<N.x && k<N.z) { ex[tk+1] = E.x[idx]; ey[tk+1] = E.y[idx]; ez[tk] = E.z[idx]; if ( tk==0 && k>0 ) { ex[0] = E.x[idx-1]; ey[0] = E.y[idx-1]; } } __syncthreads(); if ( i<N.x && k<N.z) { if ( j>0 && k>0 ) H.x[idx] -= 0.5*( ez[tk] - E.z[idx-Nzpit] - ey[tk+1] + ey[tk] ); if ( i>0 && k>0 ) H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[idx-Nyz] ); if ( i>0 && j>0 ) H.z[idx] -= 0.5*( ey[tk+1] - E.y[idx-Nyz] - ex[tk+1] + E.x[idx-Nzpit] ); } } int main() { int tstep; char time_str[32]; time_t t0; // Set the parameters N3 N; N.x = 100; N.y = 100; N.z = 500; //N.y = 16; //N.z = 20; int TMAX = 10000; printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX); // Allocate host memory P3F3 CE; CE.x = makeArray(N); CE.y = makeArray(N); CE.z = makeArray(N); float ***Ex; Ex = makeArray(N); // Geometry set_geometry(N, CE); // Allocate device memory P1F3 devE; P1F3 devH; P1F3 devCE; int z_size = N.z*sizeof(float); size_t pitch; hipMallocPitch ( (void**) &devE.x, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devE.y, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devE.z, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devH.x, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devH.y, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devH.z, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devCE.x, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devCE.y, &pitch, z_size, N.x*N.y ); hipMallocPitch ( (void**) &devCE.z, &pitch, z_size, N.x*N.y ); // Copy arrays from host to device hipMemcpy2D ( devCE.x, pitch, CE.x[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice ); hipMemcpy2D ( devCE.y, pitch, CE.y[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice ); hipMemcpy2D ( devCE.z, pitch, CE.z[0][0], z_size, z_size, N.x*N.y, hipMemcpyHostToDevice ); int Nz_pitch = pitch/4; printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch); // Set the GPU parameters int Ntot = N.x*N.y*Nz_pitch; int TPB = 512; // Number of threads per block int BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; // Number of thread blocks per grid dim3 Dg = dim3(BPG); dim3 Db = dim3(TPB); size_t Ns = sizeof(float)*( (TPB+1)+(TPB+1)+(TPB) ); printf("Threads per block: %d\n", TPB); if ( TPB > 512 ) { printf("Error: An excessive number of threads per block.\n"); exit(0); } printf("Blocks per grid: %d\n", BPG); if ( BPG > 65535 ) { printf("Error: An excessive number of blocks per grid.\n"); exit(0); } printf("Number of bytes in shared memory: %d\n", Ns); int TPBsrc = N.x; int BPGsrc = 1; dim3 Dgsrc(BPGsrc); dim3 Dbsrc(TPBsrc); int TPBinit = Nz_pitch; int BPGinit = Ntot%TPBinit == 0 ? Ntot/TPBinit : Ntot/TPBinit + 1; dim3 Dginit(BPGinit); dim3 Dbinit(TPBinit); // Initialize the device arrays hipLaunchKernelGGL(( initArrays) , dim3(Dginit),dim3(Dbinit), 0, 0, N, Nz_pitch, devE, devH ); // Main time loop t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { //for ( tstep=1; tstep<=300; tstep++) { // Update on the GPU hipLaunchKernelGGL(( updateE) , dim3(Dg),dim3(Db),Ns, 0, N, Nz_pitch, TPB, devE, devH, devCE ); hipLaunchKernelGGL(( updateSrc) , dim3(Dgsrc),dim3(Dbsrc), 0, 0, N, Nz_pitch, devE, tstep ); hipLaunchKernelGGL(( updateH) , dim3(Dg),dim3(Db),Ns, 0, N, Nz_pitch, TPB, devE, devH ); //if ( tstep/100*100 == tstep ) { // Copy arrays from device to host //hipMemcpy2D( Ex[0][0], z_size, devE.x, pitch, z_size, N.x*N.y, hipMemcpyDeviceToHost ); //print_array(N, Ex); //dumpToH5(N.x, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); //updateTimer(t0, tstep, time_str); //printf("tstep=%d\t%s\n", tstep, time_str); //} } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); }
63ba311dc62f6987977d196a9874f36b6ec31990.cu
#include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> typedef struct N3 { int x, y, z; } N3; typedef struct P3F3 { float ***x, ***y, ***z; } P3F3; typedef struct P1F3 { float *x, *y, *z; } P1F3; __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ void print_array(N3 N, float ***a) { int j,k; for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { printf("%1.4f\t", a[N.x/2][j][k]); } printf("\n"); } printf("\n"); } __host__ float ***makeArray(N3 N) { float ***f; f = (float ***) calloc (N.x, sizeof(float **)); f[0] = (float **) calloc (N.y*N.x, sizeof(float *)); f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float)); for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y; for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z; return f; } __host__ void set_geometry(N3 N, P3F3 CE) { int i,j,k; for (i=0; i<N.x; i++) { for (j=0; j<N.y; j++) { for (k=0; k<N.z; k++) { CE.x[i][j][k] = 0.5; CE.y[i][j][k] = 0.5; CE.z[i][j][k] = 0.5; } } } } __global__ void initArrays(N3 N, int Nzpit, P1F3 E, P1F3 H) { int idx; idx = blockIdx.x*blockDim.x + threadIdx.x; //printf("gridDim.x=%d\n",gridDim.x); //printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x); if ( idx < N.x*N.y*Nzpit ) { E.x[idx] = 0; E.y[idx] = 0; E.z[idx] = 0; H.x[idx] = 0; H.y[idx] = 0; H.z[idx] = 0; } } __global__ void updateE(N3 N, int Nzpit, int TPB, P1F3 E, P1F3 H, P1F3 CE) { int tk, idx; tk = threadIdx.x; idx = blockIdx.x*TPB + tk; int i,j,k; int Nyz = N.y*Nzpit; i = idx/Nyz; j = ( idx - i*Nyz )/Nzpit; k = idx - i*Nyz - j*Nzpit; //printf("[%.2d]\t\t[%.2d]\t\t[%.2d,%.2d,%.2d]\t\t[%.2d]\n", blockIdx.x, tk, i, j, k, idx); extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[TPB+1]; float* hz = (float*) &hy[TPB+1]; if ( i<N.x && k<N.z) { hx[tk] = H.x[idx]; hy[tk] = H.y[idx]; hz[tk] = H.z[idx]; if ( tk==TPB-1 && k<N.z-1 ) { hx[tk+1] = H.x[idx+1]; hy[tk+1] = H.y[idx+1]; } } __syncthreads(); if ( i<N.x && k<N.z) { if ( j<N.y-1 && k<N.z-1 ) E.x[idx] += CE.x[idx]*( H.z[idx+Nzpit] - hz[tk] - hy[tk+1] + hy[tk] ); if ( i<N.x-1 && k<N.z-1 ) E.y[idx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyz] + hz[tk] ); if ( i<N.x-1 && j<N.y-1 ) E.z[idx] += CE.z[idx]*( H.y[idx+Nyz] - hy[tk] - H.x[idx+Nzpit] + hx[tk] ); } } __global__ void updateSrc(N3 N, int Nzpit, P1F3 E, int tstep) { int idx, ijk; idx = blockIdx.x*blockDim.x + threadIdx.x; ijk = idx*(N.y)*(Nzpit) + (N.y/2)*(Nzpit) + (N.z/2); //printf("idx=%d, ijk=%d\n", idx, ijk); //Ex[ijk] += __sinf(0.1*tstep); if ( idx < N.x ) { E.x[ijk] += sin(0.1*tstep); } } __global__ void updateH(N3 N, int Nzpit, int TPB, P1F3 E, P1F3 H) { int tk, idx; tk = threadIdx.x; idx = blockIdx.x*TPB + tk; int i,j,k; int Nyz = N.y*Nzpit; i = idx/Nyz; j = ( idx - i*Nyz )/Nzpit; k = idx - i*Nyz - j*Nzpit; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[TPB+1]; float* ez = (float*) &ey[TPB+1]; if ( i<N.x && k<N.z) { ex[tk+1] = E.x[idx]; ey[tk+1] = E.y[idx]; ez[tk] = E.z[idx]; if ( tk==0 && k>0 ) { ex[0] = E.x[idx-1]; ey[0] = E.y[idx-1]; } } __syncthreads(); if ( i<N.x && k<N.z) { if ( j>0 && k>0 ) H.x[idx] -= 0.5*( ez[tk] - E.z[idx-Nzpit] - ey[tk+1] + ey[tk] ); if ( i>0 && k>0 ) H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[idx-Nyz] ); if ( i>0 && j>0 ) H.z[idx] -= 0.5*( ey[tk+1] - E.y[idx-Nyz] - ex[tk+1] + E.x[idx-Nzpit] ); } } int main() { int tstep; char time_str[32]; time_t t0; // Set the parameters N3 N; N.x = 100; N.y = 100; N.z = 500; //N.y = 16; //N.z = 20; int TMAX = 10000; printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX); // Allocate host memory P3F3 CE; CE.x = makeArray(N); CE.y = makeArray(N); CE.z = makeArray(N); float ***Ex; Ex = makeArray(N); // Geometry set_geometry(N, CE); // Allocate device memory P1F3 devE; P1F3 devH; P1F3 devCE; int z_size = N.z*sizeof(float); size_t pitch; cudaMallocPitch ( (void**) &devE.x, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devE.y, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devE.z, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devH.x, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devH.y, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devH.z, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devCE.x, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devCE.y, &pitch, z_size, N.x*N.y ); cudaMallocPitch ( (void**) &devCE.z, &pitch, z_size, N.x*N.y ); // Copy arrays from host to device cudaMemcpy2D ( devCE.x, pitch, CE.x[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice ); cudaMemcpy2D ( devCE.y, pitch, CE.y[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice ); cudaMemcpy2D ( devCE.z, pitch, CE.z[0][0], z_size, z_size, N.x*N.y, cudaMemcpyHostToDevice ); int Nz_pitch = pitch/4; printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch); // Set the GPU parameters int Ntot = N.x*N.y*Nz_pitch; int TPB = 512; // Number of threads per block int BPG = Ntot%TPB == 0 ? Ntot/TPB : Ntot/TPB + 1; // Number of thread blocks per grid dim3 Dg = dim3(BPG); dim3 Db = dim3(TPB); size_t Ns = sizeof(float)*( (TPB+1)+(TPB+1)+(TPB) ); printf("Threads per block: %d\n", TPB); if ( TPB > 512 ) { printf("Error: An excessive number of threads per block.\n"); exit(0); } printf("Blocks per grid: %d\n", BPG); if ( BPG > 65535 ) { printf("Error: An excessive number of blocks per grid.\n"); exit(0); } printf("Number of bytes in shared memory: %d\n", Ns); int TPBsrc = N.x; int BPGsrc = 1; dim3 Dgsrc(BPGsrc); dim3 Dbsrc(TPBsrc); int TPBinit = Nz_pitch; int BPGinit = Ntot%TPBinit == 0 ? Ntot/TPBinit : Ntot/TPBinit + 1; dim3 Dginit(BPGinit); dim3 Dbinit(TPBinit); // Initialize the device arrays initArrays <<<Dginit,Dbinit>>> ( N, Nz_pitch, devE, devH ); // Main time loop t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { //for ( tstep=1; tstep<=300; tstep++) { // Update on the GPU updateE <<<Dg,Db,Ns>>> ( N, Nz_pitch, TPB, devE, devH, devCE ); updateSrc <<<Dgsrc,Dbsrc>>> ( N, Nz_pitch, devE, tstep ); updateH <<<Dg,Db,Ns>>> ( N, Nz_pitch, TPB, devE, devH ); //if ( tstep/100*100 == tstep ) { // Copy arrays from device to host //cudaMemcpy2D( Ex[0][0], z_size, devE.x, pitch, z_size, N.x*N.y, cudaMemcpyDeviceToHost ); //print_array(N, Ex); //dumpToH5(N.x, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); //updateTimer(t0, tstep, time_str); //printf("tstep=%d\t%s\n", tstep, time_str); //} } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); }
f5547383e23f936a62ee46beb28e98083cabe3f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include <iostream> #include <stdio.h> // #include <vector> #include <cmath> #include <cassert> #include <cutil.h> #include <omp.h> #include "cuda_pointer.h" #define NTHREAD 64 // 64, 96, 128 or 192 #define NJBLOCK 16 // 8800GTS/512 has 16 #define NIBLOCK 16 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 1024 #define NBMAX 64 // NNB per block template <class T> struct myvector{ int num; T *val; myvector(){ num = 0; val = NULL; } ~myvector(){ delete [] val; } void clear(){ num = 0; } void reserve(size_t count){ val = new T[count]; } void free(){ delete [] val; } void push_back(const T &t){ val[num++] = t; } size_t size(){ return num; } T &operator[](int i){ return val[i]; } }; #define PROFILE #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav; static long long numInter; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(double mj, double xj[3], double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; } }; struct Iparticle{ float3 pos; float h2; float3 vel; float pad; Iparticle() {} Iparticle(double h2i, double xi[3], double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words unsigned short neib[NBMAX]; // 24 words __device__ Force(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } }; __device__ float rsqrtfNR(float x){ float y = rsqrtf(x); return (-0.5f * y) * (x*y*y - 3.0f); } #if 0 struct force1{ float dx, dy, dz; float dvx, dvy, dvz; float r2; float rv; // __device__ force1(){} __device__ void calc( const Iparticle &ip, const Jparticle &jp){ dx = jp.pos.x - ip.pos.x; dy = jp.pos.y - ip.pos.y; dz = jp.pos.z - ip.pos.z; dvx = jp.vel.x - ip.vel.x; dvy = jp.vel.y - ip.vel.y; dvz = jp.vel.z - ip.vel.z; r2 = dx*dx + dy*dy + dz*dz; rv = dx*dvx + dy*dvy + dz*dvz; } }; struct force2{ float rinv1; // __device__ force2(){} __device__ void calc( const int j, const Iparticle &ip, const force1 &f1, Force &fo){ rinv1 = rsqrtf(f1.r2); if(f1.r2 < ip.h2){ fo.neib[fo.nnb++ % NBMAX] = j; rinv1 = 0.f; } } }; struct force3{ float rinv1, rinv2, rinv3; float rv; // __device__ force3(){} __device__ void calc( const Jparticle &jp, const force1 &f1, const force2 &f2, Force &fo){ rinv1 = f2.rinv1; rinv2 = rinv1 * rinv1; rinv1 *= jp.mass; rinv3 = rinv1 * rinv2; rv = f1.rv * -3.f * rinv2; fo.pot += rinv1; fo.acc.x += rinv3 * f1.dx; fo.acc.y += rinv3 * f1.dy; fo.acc.z += rinv3 * f1.dz; fo.jrk.x += rinv3 * (f1.dvx + rv * f1.dx); fo.jrk.y += rinv3 * (f1.dvy + rv * f1.dy); fo.jrk.z += rinv3 * (f1.dvz + rv * f1.dz); } }; #endif __device__ void h4_kernel( const int j, const Iparticle &ip, const Jparticle &jp, Force &fo){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(r2 < ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; fo.neib[fo.nnb & (NBMAX-1)] = (unsigned)j; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void h4_gravity( int nbody, Iparticle ipbuf[], Jparticle jpbuf[], Force fobuf[][NJBLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + NTHREAD * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); #if 0 jpshare[tid] = jpbuf[j+tid]; #else float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; #endif __syncthreads(); if(jend-j < NTHREAD){ for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; h4_kernel(j+jj, ip, jp, fo); } }else{ #pragma unroll for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; h4_kernel(j+jj, ip, jp, fo); } } } fobuf[iaddr][jbid] = fo; } #if 0 static Jparticle *jp_host, *jp_dev; static Iparticle *ip_host, *ip_dev; static Force (*fo_host)[NJBLOCK], (*fo_dev)[NJBLOCK]; #else static cudaPointer <Jparticle> jpbuf; static cudaPointer <Iparticle> ipbuf; static cudaPointer <Force[NJBLOCK]> fobuf; #endif #define MAX_CPU 8 static myvector<int> nblist[MAX_CPU]; static int nbody, nbodymax; // static int *nblist; void GPUNB_open(int nbmax){ time_send = time_grav = 0.0; numInter = 0; // CUT_DEVICE_INIT(); // size_t jpsize = nbmax * sizeof(Jparticle); // size_t ipsize = NIMAX * sizeof(Iparticle); // size_t fosize = NIBLOCK * NJBLOCK * NTHREAD * sizeof(Force); // hipHostMalloc((void **)&jp_host, jpsize); // jpsize += NTHREAD * sizeof(Jparticle); // hipMalloc ((void **)&jp_dev , jpsize); // hipHostMalloc((void **)&ip_host, ipsize); // hipMalloc ((void **)&ip_dev , ipsize); // hipHostMalloc((void **)&fo_host, fosize); // hipMalloc ((void **)&fo_dev , fosize); jpbuf.allocate(nbmax + NTHREAD); ipbuf.allocate(NIMAX); fobuf.allocate(NIMAX); nbodymax = nbmax; #pragma omp parallel { int tid = omp_get_thread_num(); nblist[tid].reserve(nbmax); } } void GPUNB_close(){ // hipHostFree(jp_host); // hipFree (jp_dev); // hipHostFree(ip_host); // hipFree (ip_dev); // hipHostFree(fo_host); // hipFree (fo_dev); jpbuf.free(); ipbuf.free(); fobuf.free(); nbodymax = 0; #ifdef PROFILE #if 0 std::cerr << "***********************" << std::endl; std::cerr << "time send : " << time_send << " sec " << std::endl; std::cerr << "time grav : " << time_grav << " sec " << std::endl; std::cerr << 60.e-9 * numInter / time_grav << " Gflops (gravity part only)" << std::endl; std::cerr << "***********************" << std::endl; #else fprintf(stderr, "***********************\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif #endif } void GPUNB_send( int nj, double mj[], double xj[][3], double vj[][3]){ time_send -= get_wtime(); nbody = nj; assert(nbody <= nbodymax); for(int j=0; j<nj; j++){ // jp_host[j] = Jparticle(mj[j], xj[j], vj[j]); jpbuf[j] = Jparticle(mj[j], xj[j], vj[j]); } // size_t jpsize = nj * sizeof(Jparticle); // hipMemcpy(jp_dev, jp_host, jpsize, hipMemcpyHostToDevice); jpbuf.htod(nj); time_send += get_wtime(); } void GPUNB_regf( int ni, double h2[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nbmax, int *listbase){ time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); for(int i=0; i<ni; i++){ // ip_host[i] = Iparticle(h2[i], xi[i], vi[i]); ipbuf[i] = Iparticle(h2[i], xi[i], vi[i]); } // set i-particles // size_t ipsize = ni * sizeof(Iparticle); // hipMemcpy(ip_dev, ip_host, ipsize, hipMemcpyHostToDevice); ipbuf.htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); #if 0 int sharedMemSize = NTHREAD * sizeof(Jparticle); hipLaunchKernelGGL(( h4_gravity) , dim3(grid), dim3(threads), sharedMemSize , 0, nbody, ip_dev, jp_dev, fo_dev); #else // h4_gravity <<< grid, threads >>> // (nbody, ip_dev, jp_dev, fo_dev); hipLaunchKernelGGL(( h4_gravity) , dim3(grid), dim3(threads) , 0, 0, nbody, ipbuf, jpbuf, fobuf); #endif // recieve force // size_t fosize = ni * NJBLOCK * sizeof(Force); // hipMemcpy(fo_host, fo_dev, fosize, hipMemcpyDeviceToHost); fobuf.dtoh(ni); // reduction phase #pragma omp parallel for for(int i=0; i<ni; i++){ int tid = omp_get_thread_num(); double ax=0, ay=0, az=0; double jx=0, jy=0, jz=0; #ifdef POTENTIAL double poti=0; #endif for(int jb=0; jb<NJBLOCK; jb++){ // Force &fo = fo_host[i][jb]; Force &fo = fobuf[i][jb]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; #ifdef POTENTIAL poti += fo.pot; #endif } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; // fprintf(stderr, "%f %f %f %f %f %f\n", ax, ay, az, jx, jy, jz); // exit(0); #ifdef POTENTIAL pot[i] = poti; #endif bool overflow = false; nblist[tid].clear(); for(int jb=0; jb<NJBLOCK; jb++){ // Force &fo = fo_host[i][jb]; Force &fo = fobuf[i][jb]; int jstart = (nbody * jb) / NJBLOCK; if(fo.nnb <= NBMAX){ for(int k=0; k<fo.nnb; k++){ int nb = fo.neib[k]; while(nb < jstart) nb += (1<<16); nblist[tid].push_back(nb); // nblist.push_back(fo.neib[k]); } }else{ overflow = true; } } int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = nblist[tid].size(); if(nnb > nbmax) overflow = true; // assert(!overflow); if(overflow){ *nnbp = -1; }else{ *nnbp = nnb; for(int k=0; k<nnb; k++){ nblistp[k] = nblist[tid][k]; } } } #if 0 if(ni > 0){ FILE *fp = fopen("Force.gpu", "w"); assert(fp); for(int i=0; i<ni; i++){ int nnb = listbase[i*lmax]; fprintf(fp, "%d %9.2e %9.2e %9.2e %9.2e %9.2e %9.2e %d\n", i, acc[i][0], acc[i][1], acc[i][2], jrk[i][0], jrk[i][1], jrk[i][2], nnb); } fprintf(fp, "\n"); fclose(fp); exit(1); } #endif time_grav += get_wtime(); } extern "C" { void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] GPUNB_regf(*ni, h2, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); } }
f5547383e23f936a62ee46beb28e98083cabe3f5.cu
// #include <iostream> #include <stdio.h> // #include <vector> #include <cmath> #include <cassert> #include <cutil.h> #include <omp.h> #include "cuda_pointer.h" #define NTHREAD 64 // 64, 96, 128 or 192 #define NJBLOCK 16 // 8800GTS/512 has 16 #define NIBLOCK 16 // 16 or 32 #define NIMAX (NTHREAD * NIBLOCK) // 1024 #define NBMAX 64 // NNB per block template <class T> struct myvector{ int num; T *val; myvector(){ num = 0; val = NULL; } ~myvector(){ delete [] val; } void clear(){ num = 0; } void reserve(size_t count){ val = new T[count]; } void free(){ delete [] val; } void push_back(const T &t){ val[num++] = t; } size_t size(){ return num; } T &operator[](int i){ return val[i]; } }; #define PROFILE #ifdef PROFILE #include <sys/time.h> static double get_wtime(){ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + 1.e-6 * tv.tv_usec; } #else static double get_wtime(){ return 0.0; } #endif static double time_send, time_grav; static long long numInter; struct Jparticle{ float3 pos; float mass; float3 vel; float pad; Jparticle() {} Jparticle(double mj, double xj[3], double vj[3]){ pos.x = xj[0]; pos.y = xj[1]; pos.z = xj[2]; mass = mj; vel.x = vj[0]; vel.y = vj[1]; vel.z = vj[2]; } }; struct Iparticle{ float3 pos; float h2; float3 vel; float pad; Iparticle() {} Iparticle(double h2i, double xi[3], double vi[3]){ pos.x = xi[0]; pos.y = xi[1]; pos.z = xi[2]; h2 = h2i; vel.x = vi[0]; vel.y = vi[1]; vel.z = vi[2]; } }; struct Force{ float3 acc; float pot; float3 jrk; int nnb; // 8 words unsigned short neib[NBMAX]; // 24 words __device__ Force(){ acc.x = acc.y = acc.z = 0.f; jrk.x = jrk.y = jrk.z = 0.f; pot = 0.f; nnb = 0; } }; __device__ float rsqrtfNR(float x){ float y = rsqrtf(x); return (-0.5f * y) * (x*y*y - 3.0f); } #if 0 struct force1{ float dx, dy, dz; float dvx, dvy, dvz; float r2; float rv; // __device__ force1(){} __device__ void calc( const Iparticle &ip, const Jparticle &jp){ dx = jp.pos.x - ip.pos.x; dy = jp.pos.y - ip.pos.y; dz = jp.pos.z - ip.pos.z; dvx = jp.vel.x - ip.vel.x; dvy = jp.vel.y - ip.vel.y; dvz = jp.vel.z - ip.vel.z; r2 = dx*dx + dy*dy + dz*dz; rv = dx*dvx + dy*dvy + dz*dvz; } }; struct force2{ float rinv1; // __device__ force2(){} __device__ void calc( const int j, const Iparticle &ip, const force1 &f1, Force &fo){ rinv1 = rsqrtf(f1.r2); if(f1.r2 < ip.h2){ fo.neib[fo.nnb++ % NBMAX] = j; rinv1 = 0.f; } } }; struct force3{ float rinv1, rinv2, rinv3; float rv; // __device__ force3(){} __device__ void calc( const Jparticle &jp, const force1 &f1, const force2 &f2, Force &fo){ rinv1 = f2.rinv1; rinv2 = rinv1 * rinv1; rinv1 *= jp.mass; rinv3 = rinv1 * rinv2; rv = f1.rv * -3.f * rinv2; fo.pot += rinv1; fo.acc.x += rinv3 * f1.dx; fo.acc.y += rinv3 * f1.dy; fo.acc.z += rinv3 * f1.dz; fo.jrk.x += rinv3 * (f1.dvx + rv * f1.dx); fo.jrk.y += rinv3 * (f1.dvy + rv * f1.dy); fo.jrk.z += rinv3 * (f1.dvz + rv * f1.dz); } }; #endif __device__ void h4_kernel( const int j, const Iparticle &ip, const Jparticle &jp, Force &fo){ float dx = jp.pos.x - ip.pos.x; float dy = jp.pos.y - ip.pos.y; float dz = jp.pos.z - ip.pos.z; float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float r2 = dx*dx + dy*dy + dz*dz; float rv = dx*dvx + dy*dvy + dz*dvz; float rinv1 = rsqrtf(r2); if(r2 < ip.h2){ // fo.neib[fo.nnb++ % NBMAX] = j; fo.neib[fo.nnb & (NBMAX-1)] = (unsigned)j; fo.nnb++; rinv1 = 0.f; } float rinv2 = rinv1 * rinv1; float mrinv1 = jp.mass * rinv1; float mrinv3 = mrinv1 * rinv2; rv *= -3.f * rinv2; #ifdef POTENTIAL fo.pot += mrinv1; #endif fo.acc.x += mrinv3 * dx; fo.acc.y += mrinv3 * dy; fo.acc.z += mrinv3 * dz; // fo.acc.z += 1.0; fo.jrk.x += mrinv3 * (dvx + rv * dx); fo.jrk.y += mrinv3 * (dvy + rv * dy); fo.jrk.z += mrinv3 * (dvz + rv * dz); } __global__ void h4_gravity( int nbody, Iparticle ipbuf[], Jparticle jpbuf[], Force fobuf[][NJBLOCK]){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + NTHREAD * ibid; int jstart = (nbody * (jbid )) / NJBLOCK; int jend = (nbody * (jbid+1)) / NJBLOCK; Iparticle ip = ipbuf[iaddr]; Force fo; for(int j=jstart; j<jend; j+=NTHREAD){ __shared__ Jparticle jpshare[NTHREAD]; __syncthreads(); #if 0 jpshare[tid] = jpbuf[j+tid]; #else float4 *src = (float4 *)&jpbuf[j]; float4 *dst = (float4 *)jpshare; dst[ tid] = src[ tid]; dst[NTHREAD+tid] = src[NTHREAD+tid]; #endif __syncthreads(); if(jend-j < NTHREAD){ for(int jj=0; jj<jend-j; jj++){ Jparticle jp = jpshare[jj]; h4_kernel(j+jj, ip, jp, fo); } }else{ #pragma unroll for(int jj=0; jj<NTHREAD; jj++){ Jparticle jp = jpshare[jj]; h4_kernel(j+jj, ip, jp, fo); } } } fobuf[iaddr][jbid] = fo; } #if 0 static Jparticle *jp_host, *jp_dev; static Iparticle *ip_host, *ip_dev; static Force (*fo_host)[NJBLOCK], (*fo_dev)[NJBLOCK]; #else static cudaPointer <Jparticle> jpbuf; static cudaPointer <Iparticle> ipbuf; static cudaPointer <Force[NJBLOCK]> fobuf; #endif #define MAX_CPU 8 static myvector<int> nblist[MAX_CPU]; static int nbody, nbodymax; // static int *nblist; void GPUNB_open(int nbmax){ time_send = time_grav = 0.0; numInter = 0; // CUT_DEVICE_INIT(); // size_t jpsize = nbmax * sizeof(Jparticle); // size_t ipsize = NIMAX * sizeof(Iparticle); // size_t fosize = NIBLOCK * NJBLOCK * NTHREAD * sizeof(Force); // cudaMallocHost((void **)&jp_host, jpsize); // jpsize += NTHREAD * sizeof(Jparticle); // cudaMalloc ((void **)&jp_dev , jpsize); // cudaMallocHost((void **)&ip_host, ipsize); // cudaMalloc ((void **)&ip_dev , ipsize); // cudaMallocHost((void **)&fo_host, fosize); // cudaMalloc ((void **)&fo_dev , fosize); jpbuf.allocate(nbmax + NTHREAD); ipbuf.allocate(NIMAX); fobuf.allocate(NIMAX); nbodymax = nbmax; #pragma omp parallel { int tid = omp_get_thread_num(); nblist[tid].reserve(nbmax); } } void GPUNB_close(){ // cudaFreeHost(jp_host); // cudaFree (jp_dev); // cudaFreeHost(ip_host); // cudaFree (ip_dev); // cudaFreeHost(fo_host); // cudaFree (fo_dev); jpbuf.free(); ipbuf.free(); fobuf.free(); nbodymax = 0; #ifdef PROFILE #if 0 std::cerr << "***********************" << std::endl; std::cerr << "time send : " << time_send << " sec " << std::endl; std::cerr << "time grav : " << time_grav << " sec " << std::endl; std::cerr << 60.e-9 * numInter / time_grav << " Gflops (gravity part only)" << std::endl; std::cerr << "***********************" << std::endl; #else fprintf(stderr, "***********************\n"); fprintf(stderr, "time send : %f sec\n", time_send); fprintf(stderr, "time grav : %f sec\n", time_grav); fprintf(stderr, "%f Gflops (gravity part only)\n", 60.e-9 * numInter / time_grav); fprintf(stderr, "***********************\n"); #endif #endif } void GPUNB_send( int nj, double mj[], double xj[][3], double vj[][3]){ time_send -= get_wtime(); nbody = nj; assert(nbody <= nbodymax); for(int j=0; j<nj; j++){ // jp_host[j] = Jparticle(mj[j], xj[j], vj[j]); jpbuf[j] = Jparticle(mj[j], xj[j], vj[j]); } // size_t jpsize = nj * sizeof(Jparticle); // cudaMemcpy(jp_dev, jp_host, jpsize, cudaMemcpyHostToDevice); jpbuf.htod(nj); time_send += get_wtime(); } void GPUNB_regf( int ni, double h2[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int lmax, int nbmax, int *listbase){ time_grav -= get_wtime(); numInter += ni * nbody; assert(0 < ni && ni <= NIMAX); for(int i=0; i<ni; i++){ // ip_host[i] = Iparticle(h2[i], xi[i], vi[i]); ipbuf[i] = Iparticle(h2[i], xi[i], vi[i]); } // set i-particles // size_t ipsize = ni * sizeof(Iparticle); // cudaMemcpy(ip_dev, ip_host, ipsize, cudaMemcpyHostToDevice); ipbuf.htod(ni); // gravity kernel int niblock = 1 + (ni-1) / NTHREAD; dim3 grid(niblock, NJBLOCK, 1); dim3 threads(NTHREAD, 1, 1); #if 0 int sharedMemSize = NTHREAD * sizeof(Jparticle); h4_gravity <<< grid, threads, sharedMemSize >>> (nbody, ip_dev, jp_dev, fo_dev); #else // h4_gravity <<< grid, threads >>> // (nbody, ip_dev, jp_dev, fo_dev); h4_gravity <<< grid, threads >>> (nbody, ipbuf, jpbuf, fobuf); #endif // recieve force // size_t fosize = ni * NJBLOCK * sizeof(Force); // cudaMemcpy(fo_host, fo_dev, fosize, cudaMemcpyDeviceToHost); fobuf.dtoh(ni); // reduction phase #pragma omp parallel for for(int i=0; i<ni; i++){ int tid = omp_get_thread_num(); double ax=0, ay=0, az=0; double jx=0, jy=0, jz=0; #ifdef POTENTIAL double poti=0; #endif for(int jb=0; jb<NJBLOCK; jb++){ // Force &fo = fo_host[i][jb]; Force &fo = fobuf[i][jb]; ax += fo.acc.x; ay += fo.acc.y; az += fo.acc.z; jx += fo.jrk.x; jy += fo.jrk.y; jz += fo.jrk.z; #ifdef POTENTIAL poti += fo.pot; #endif } acc[i][0] = ax; acc[i][1] = ay; acc[i][2] = az; jrk[i][0] = jx; jrk[i][1] = jy; jrk[i][2] = jz; // fprintf(stderr, "%f %f %f %f %f %f\n", ax, ay, az, jx, jy, jz); // exit(0); #ifdef POTENTIAL pot[i] = poti; #endif bool overflow = false; nblist[tid].clear(); for(int jb=0; jb<NJBLOCK; jb++){ // Force &fo = fo_host[i][jb]; Force &fo = fobuf[i][jb]; int jstart = (nbody * jb) / NJBLOCK; if(fo.nnb <= NBMAX){ for(int k=0; k<fo.nnb; k++){ int nb = fo.neib[k]; while(nb < jstart) nb += (1<<16); nblist[tid].push_back(nb); // nblist.push_back(fo.neib[k]); } }else{ overflow = true; } } int *nnbp = listbase + lmax * i; int *nblistp = nnbp + 1; int nnb = nblist[tid].size(); if(nnb > nbmax) overflow = true; // assert(!overflow); if(overflow){ *nnbp = -1; }else{ *nnbp = nnb; for(int k=0; k<nnb; k++){ nblistp[k] = nblist[tid][k]; } } } #if 0 if(ni > 0){ FILE *fp = fopen("Force.gpu", "w"); assert(fp); for(int i=0; i<ni; i++){ int nnb = listbase[i*lmax]; fprintf(fp, "%d %9.2e %9.2e %9.2e %9.2e %9.2e %9.2e %d\n", i, acc[i][0], acc[i][1], acc[i][2], jrk[i][0], jrk[i][1], jrk[i][2], nnb); } fprintf(fp, "\n"); fclose(fp); exit(1); } #endif time_grav += get_wtime(); } extern "C" { void gpunb_open_(int *nbmax){ GPUNB_open(*nbmax); } void gpunb_close_(){ GPUNB_close(); } void gpunb_send_( int *nj, double mj[], double xj[][3], double vj[][3]){ GPUNB_send(*nj, mj, xj, vj); } void gpunb_regf_( int *ni, double h2[], double xi[][3], double vi[][3], double acc[][3], double jrk[][3], double pot[], int *lmax, int *nbmax, int *list){ // list[][lmax] GPUNB_regf(*ni, h2, xi, vi, acc, jrk, pot, *lmax, *nbmax, list); } }
817cbceaa256817ed60c5e67353f879b17c865a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/detection/yolo_box_op.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void KeYoloBoxFw(const T* input, const int* imgsize, T* boxes, T* scores, const float conf_thresh, const int* anchors, const int n, const int h, const int w, const int an_num, const int class_num, const int box_num, int input_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; T box[4]; for (; tid < n * box_num; tid += stride) { int grid_num = h * w; int i = tid / box_num; int j = (tid % box_num) / grid_num; int k = (tid % grid_num) / w; int l = tid % w; int an_stride = (5 + class_num) * grid_num; int img_height = imgsize[2 * i]; int img_width = imgsize[2 * i + 1]; int obj_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 4); T conf = sigmoid<T>(input[obj_idx]); if (conf < conf_thresh) { continue; } int box_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 0); GetYoloBox<T>(box, input, anchors, l, k, j, h, input_size, box_idx, grid_num, img_height, img_width); box_idx = (i * box_num + j * grid_num + k * w + l) * 4; CalcDetectionBox<T>(boxes, box, box_idx, img_height, img_width); int label_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 5); int score_idx = (i * box_num + j * grid_num + k * w + l) * class_num; CalcLabelScore<T>(scores, input, label_idx, score_idx, class_num, conf, grid_num); } } template <typename T> class YoloBoxOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* img_size = ctx.Input<Tensor>("ImgSize"); auto* boxes = ctx.Output<Tensor>("Boxes"); auto* scores = ctx.Output<Tensor>("Scores"); auto anchors = ctx.Attr<std::vector<int>>("anchors"); int class_num = ctx.Attr<int>("class_num"); float conf_thresh = ctx.Attr<float>("conf_thresh"); int downsample_ratio = ctx.Attr<int>("downsample_ratio"); const int n = input->dims()[0]; const int h = input->dims()[2]; const int w = input->dims()[3]; const int box_num = boxes->dims()[1]; const int an_num = anchors.size() / 2; int input_size = downsample_ratio * h; auto& dev_ctx = ctx.cuda_device_context(); int bytes = sizeof(int) * anchors.size(); auto anchors_ptr = memory::Alloc(dev_ctx, sizeof(int) * anchors.size()); int* anchors_data = reinterpret_cast<int*>(anchors_ptr->ptr()); const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace()); const auto cplace = platform::CPUPlace(); memory::Copy(gplace, anchors_data, cplace, anchors.data(), bytes, dev_ctx.stream()); const T* input_data = input->data<T>(); const int* imgsize_data = img_size->data<int>(); T* boxes_data = boxes->mutable_data<T>({n, box_num, 4}, ctx.GetPlace()); T* scores_data = scores->mutable_data<T>({n, box_num, class_num}, ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; set_zero(dev_ctx, boxes, static_cast<T>(0)); set_zero(dev_ctx, scores, static_cast<T>(0)); int grid_dim = (n * box_num + 512 - 1) / 512; grid_dim = grid_dim > 8 ? 8 : grid_dim; hipLaunchKernelGGL(( KeYoloBoxFw<T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(), input_data, imgsize_data, boxes_data, scores_data, conf_thresh, anchors_data, n, h, w, an_num, class_num, box_num, input_size); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(yolo_box, ops::YoloBoxOpCUDAKernel<float>, ops::YoloBoxOpCUDAKernel<double>);
817cbceaa256817ed60c5e67353f879b17c865a4.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/detection/yolo_box_op.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void KeYoloBoxFw(const T* input, const int* imgsize, T* boxes, T* scores, const float conf_thresh, const int* anchors, const int n, const int h, const int w, const int an_num, const int class_num, const int box_num, int input_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; T box[4]; for (; tid < n * box_num; tid += stride) { int grid_num = h * w; int i = tid / box_num; int j = (tid % box_num) / grid_num; int k = (tid % grid_num) / w; int l = tid % w; int an_stride = (5 + class_num) * grid_num; int img_height = imgsize[2 * i]; int img_width = imgsize[2 * i + 1]; int obj_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 4); T conf = sigmoid<T>(input[obj_idx]); if (conf < conf_thresh) { continue; } int box_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 0); GetYoloBox<T>(box, input, anchors, l, k, j, h, input_size, box_idx, grid_num, img_height, img_width); box_idx = (i * box_num + j * grid_num + k * w + l) * 4; CalcDetectionBox<T>(boxes, box, box_idx, img_height, img_width); int label_idx = GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 5); int score_idx = (i * box_num + j * grid_num + k * w + l) * class_num; CalcLabelScore<T>(scores, input, label_idx, score_idx, class_num, conf, grid_num); } } template <typename T> class YoloBoxOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* img_size = ctx.Input<Tensor>("ImgSize"); auto* boxes = ctx.Output<Tensor>("Boxes"); auto* scores = ctx.Output<Tensor>("Scores"); auto anchors = ctx.Attr<std::vector<int>>("anchors"); int class_num = ctx.Attr<int>("class_num"); float conf_thresh = ctx.Attr<float>("conf_thresh"); int downsample_ratio = ctx.Attr<int>("downsample_ratio"); const int n = input->dims()[0]; const int h = input->dims()[2]; const int w = input->dims()[3]; const int box_num = boxes->dims()[1]; const int an_num = anchors.size() / 2; int input_size = downsample_ratio * h; auto& dev_ctx = ctx.cuda_device_context(); int bytes = sizeof(int) * anchors.size(); auto anchors_ptr = memory::Alloc(dev_ctx, sizeof(int) * anchors.size()); int* anchors_data = reinterpret_cast<int*>(anchors_ptr->ptr()); const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace()); const auto cplace = platform::CPUPlace(); memory::Copy(gplace, anchors_data, cplace, anchors.data(), bytes, dev_ctx.stream()); const T* input_data = input->data<T>(); const int* imgsize_data = img_size->data<int>(); T* boxes_data = boxes->mutable_data<T>({n, box_num, 4}, ctx.GetPlace()); T* scores_data = scores->mutable_data<T>({n, box_num, class_num}, ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, T> set_zero; set_zero(dev_ctx, boxes, static_cast<T>(0)); set_zero(dev_ctx, scores, static_cast<T>(0)); int grid_dim = (n * box_num + 512 - 1) / 512; grid_dim = grid_dim > 8 ? 8 : grid_dim; KeYoloBoxFw<T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>( input_data, imgsize_data, boxes_data, scores_data, conf_thresh, anchors_data, n, h, w, an_num, class_num, box_num, input_size); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(yolo_box, ops::YoloBoxOpCUDAKernel<float>, ops::YoloBoxOpCUDAKernel<double>);
46383dc9e5df9f54a42b6ab08fc0cbb1f2960748.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "simpleAddCPU.hpp" #include "simpleAddGPU.h" #include <stdio.h> int main(void){ // int *a; // a = new int[N]; // int *b; // b = new int[N]; // int *c; // c = new int[N]; int LOOP_COUNT = 1000; int NUM_PARTICLES = 1000000; long N = 300*NUM_PARTICLES; // ~ 300 variable arrays in Graphyt int ITERATE_COUNT = 1; double* a; hipMallocManaged(&a, N*sizeof(double)); double* b; hipMallocManaged(&b, N*sizeof(double)); double* c; hipMallocManaged(&c, N*sizeof(double)); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( randomize_cuda), dim3(1), dim3(blockSize), 0, 0, a,b,c,N); hipDeviceSynchronize(); double t1 = omp_get_wtime(); for(int i=0;i<LOOP_COUNT;i++) { hipLaunchKernelGGL(( add_cuda), dim3(numBlocks), dim3(blockSize), 0, 0, a,b,c,N,ITERATE_COUNT); } hipDeviceSynchronize(); double t2 = omp_get_wtime(); for(int p=0;p<10;p++){ printf("%f, %f,%f \n",a[p],b[p],c[p]); } printf("Particles: %li\nGPU Time: %.2fs\n",N/300,t2-t1); double t3 = omp_get_wtime(); for(int i=0;i<LOOP_COUNT;i++) { add_CPU(a,b,c,N,ITERATE_COUNT); } double t4 = omp_get_wtime(); for(int p=0;p<10;p++){ printf("%f, %f,%f \n",a[p],b[p],c[p]); } printf("Particles: %li\nGPU Time: %.2fs\nCPU Time: %.2fs\nApprox. Speed-up: %.1f\n",N/300,t2-t1,t4-t3,(t4-t3)/(t2-t1)); // delete a; // delete b; // delete c; hipDeviceSynchronize(); hipFree(a); hipFree(b); hipFree(c); return 0; }
46383dc9e5df9f54a42b6ab08fc0cbb1f2960748.cu
#include "simpleAddCPU.hpp" #include "simpleAddGPU.h" #include <stdio.h> int main(void){ // int *a; // a = new int[N]; // int *b; // b = new int[N]; // int *c; // c = new int[N]; int LOOP_COUNT = 1000; int NUM_PARTICLES = 1000000; long N = 300*NUM_PARTICLES; // ~ 300 variable arrays in Graphyt int ITERATE_COUNT = 1; double* a; cudaMallocManaged(&a, N*sizeof(double)); double* b; cudaMallocManaged(&b, N*sizeof(double)); double* c; cudaMallocManaged(&c, N*sizeof(double)); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; randomize_cuda<<<1, blockSize>>>(a,b,c,N); cudaDeviceSynchronize(); double t1 = omp_get_wtime(); for(int i=0;i<LOOP_COUNT;i++) { add_cuda<<<numBlocks, blockSize>>>(a,b,c,N,ITERATE_COUNT); } cudaDeviceSynchronize(); double t2 = omp_get_wtime(); for(int p=0;p<10;p++){ printf("%f, %f,%f \n",a[p],b[p],c[p]); } printf("Particles: %li\nGPU Time: %.2fs\n",N/300,t2-t1); double t3 = omp_get_wtime(); for(int i=0;i<LOOP_COUNT;i++) { add_CPU(a,b,c,N,ITERATE_COUNT); } double t4 = omp_get_wtime(); for(int p=0;p<10;p++){ printf("%f, %f,%f \n",a[p],b[p],c[p]); } printf("Particles: %li\nGPU Time: %.2fs\nCPU Time: %.2fs\nApprox. Speed-up: %.1f\n",N/300,t2-t1,t4-t3,(t4-t3)/(t2-t1)); // delete a; // delete b; // delete c; cudaDeviceSynchronize(); cudaFree(a); cudaFree(b); cudaFree(c); return 0; }
81ae25750a7ca59a845d6eb9827503739f7219d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void gpu_unmix24(int32_t * u, int32_t * v, uint8_t * out, uint32_t stride, uint32_t * numSamples, int32_t * mixbits, int32_t * mixres, uint16_t * shiftUV, int32_t bytesShifted, int32_t theOutputPacketBytes, uint32_t frameLength) { int block = blockIdx.x % 8; int index = blockIdx.x / 8; int z = threadIdx.x + block * blockDim.x; if (z < numSamples[index]) { int32_t shift = bytesShifted * 8; int32_t l, r; int32_t k = z * 2; uint8_t * op = out + (index * theOutputPacketBytes); if (mixres[index] != 0) { /* matrixed stereo */ l = (u + index * frameLength)[z] + (v + index * frameLength)[z] - ((mixres[index] * (v + index * frameLength)[z]) >> mixbits[index]); r = l - (v + index * frameLength)[z]; l = (l << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 0]; r = (r << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 1]; op += 3 * z; op += (stride - 1) * 3 * z; op[HBYTE] = (uint8_t)((l >> 16) & 0xffu); op[MBYTE] = (uint8_t)((l >> 8) & 0xffu); op[LBYTE] = (uint8_t)((l >> 0) & 0xffu); op += 3; op[HBYTE] = (uint8_t)((r >> 16) & 0xffu); op[MBYTE] = (uint8_t)((r >> 8) & 0xffu); op[LBYTE] = (uint8_t)((r >> 0) & 0xffu); } else { /* Conventional separated stereo. */ l = (u + index * frameLength)[z]; r = (v + index * frameLength)[z]; l = (l << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 0]; r = (r << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 1]; op += 3 * z; op += (stride - 1) * 3 * z; op[HBYTE] = (uint8_t)((l >> 16) & 0xffu); op[MBYTE] = (uint8_t)((l >> 8) & 0xffu); op[LBYTE] = (uint8_t)((l >> 0) & 0xffu); op += 3; op[HBYTE] = (uint8_t)((r >> 16) & 0xffu); op[MBYTE] = (uint8_t)((r >> 8) & 0xffu); op[LBYTE] = (uint8_t)((r >> 0) & 0xffu); } } }
81ae25750a7ca59a845d6eb9827503739f7219d9.cu
#include "includes.h" __global__ void gpu_unmix24(int32_t * u, int32_t * v, uint8_t * out, uint32_t stride, uint32_t * numSamples, int32_t * mixbits, int32_t * mixres, uint16_t * shiftUV, int32_t bytesShifted, int32_t theOutputPacketBytes, uint32_t frameLength) { int block = blockIdx.x % 8; int index = blockIdx.x / 8; int z = threadIdx.x + block * blockDim.x; if (z < numSamples[index]) { int32_t shift = bytesShifted * 8; int32_t l, r; int32_t k = z * 2; uint8_t * op = out + (index * theOutputPacketBytes); if (mixres[index] != 0) { /* matrixed stereo */ l = (u + index * frameLength)[z] + (v + index * frameLength)[z] - ((mixres[index] * (v + index * frameLength)[z]) >> mixbits[index]); r = l - (v + index * frameLength)[z]; l = (l << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 0]; r = (r << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 1]; op += 3 * z; op += (stride - 1) * 3 * z; op[HBYTE] = (uint8_t)((l >> 16) & 0xffu); op[MBYTE] = (uint8_t)((l >> 8) & 0xffu); op[LBYTE] = (uint8_t)((l >> 0) & 0xffu); op += 3; op[HBYTE] = (uint8_t)((r >> 16) & 0xffu); op[MBYTE] = (uint8_t)((r >> 8) & 0xffu); op[LBYTE] = (uint8_t)((r >> 0) & 0xffu); } else { /* Conventional separated stereo. */ l = (u + index * frameLength)[z]; r = (v + index * frameLength)[z]; l = (l << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 0]; r = (r << shift) | (uint32_t)(shiftUV + index * frameLength * 2)[k + 1]; op += 3 * z; op += (stride - 1) * 3 * z; op[HBYTE] = (uint8_t)((l >> 16) & 0xffu); op[MBYTE] = (uint8_t)((l >> 8) & 0xffu); op[LBYTE] = (uint8_t)((l >> 0) & 0xffu); op += 3; op[HBYTE] = (uint8_t)((r >> 16) & 0xffu); op[MBYTE] = (uint8_t)((r >> 8) & 0xffu); op[LBYTE] = (uint8_t)((r >> 0) & 0xffu); } } }
b63c7b237c47efde7dc53555b736bc4c061389c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[24,768] --blockDim=[16,4] // in host invocation // assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); // assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); // assert(imageH % ROWS_BLOCKDIM_Y == 0); #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __constant__ float c_Kernel[KERNEL_LENGTH]; __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __requires(pitch == 3072); __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; #define base (baseY * pitch + baseX) __invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)%(ROWS_BLOCKDIM_X) == 0)), __invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(ROWS_BLOCKDIM_X) >= ROWS_HALO_STEPS)), __invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(ROWS_BLOCKDIM_X) < ROWS_HALO_STEPS + ROWS_RESULT_STEPS)), i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } }
b63c7b237c47efde7dc53555b736bc4c061389c5.cu
//pass //--gridDim=[24,768] --blockDim=[16,4] // in host invocation // assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); // assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); // assert(imageH % ROWS_BLOCKDIM_Y == 0); #define KERNEL_RADIUS 8 #define KERNEL_LENGTH (2 * KERNEL_RADIUS + 1) #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 1 __constant__ float c_Kernel[KERNEL_LENGTH]; __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch ) { __requires(pitch == 3072); __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; #define base (baseY * pitch + baseX) __invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)%(ROWS_BLOCKDIM_X) == 0)), __invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(ROWS_BLOCKDIM_X) >= ROWS_HALO_STEPS)), __invariant(__write_implies(d_Dst, (__write_offset_bytes(d_Dst)/sizeof(float) - base)/(ROWS_BLOCKDIM_X) < ROWS_HALO_STEPS + ROWS_RESULT_STEPS)), i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } }
316f38be9615b0e4d32f397d41638bd10180afa5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define HISTOGRAM_LENGTH 256 __global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*blockDim.y+ty; int col = bx*blockDim.x+tx; int index = row*width + col; if(row < height && col < width) { grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]); } }
316f38be9615b0e4d32f397d41638bd10180afa5.cu
#include "includes.h" #define HISTOGRAM_LENGTH 256 __global__ void convertToGrayScale(unsigned char * ucharImg, unsigned char * grayImg, int width, int height) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*blockDim.y+ty; int col = bx*blockDim.x+tx; int index = row*width + col; if(row < height && col < width) { grayImg[index] = (unsigned char) (0.21*ucharImg[index*3] + 0.71*ucharImg[index*3 + 1] + 0.07*ucharImg[index*3 + 2]); } }
ececb7e152e90cafd6a60c5e26f26c96be849daf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) "2021, by Centre Europen de Recherche et de Formation Avance en Calcul Scientifiq // Developer: Mario Di Renzo // Affiliation: Centre Europen de Recherche et de Formation Avance en Calcul Scientifique // URL: https://cerfacs.fr // Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020). // HTR solver: An open-source exascale-oriented task-based // multi-GPU high-order code for hypersonic aerothermodynamics. // Computer Physics Communications 255, 107262" // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "cuda_utils.hpp" //----------------------------------------------------------------------------- // These utilities perform butterfly reduction across a DeferedBuffer //----------------------------------------------------------------------------- __global__ void ReduceBufferSum_kernel(const Legion::DeferredBuffer<double, 1> buffer, const Legion::DeferredValue<double> result, const size_t size) { // We know there is never more than 32 warps in a CTA __shared__ double trampoline[32]; // Each thread reduces all the correspoinding values int offset = threadIdx.x; double my_r = 0.0; // Spectral radius cannot be lower than 0 while (offset < size) { my_r += buffer[Legion::Point<1>(offset)]; offset += blockDim.x; } // make sure that everyone is done with its reduction __syncthreads(); // Perform a local reduction inside the CTA // Butterfly reduction across all threads in all warps for (int i = 16; i >= 1; i/=2) my_r += __shfl_xor_sync(0xfffffff, my_r, i, 32); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // First thread in each warp writes out all values if (laneid == 0) trampoline[warpid] = my_r; __syncthreads(); // Butterfly reduction across all threads in the first warp if (warpid == 0) { unsigned numwarps = blockDim.x >> 5; my_r = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) my_r += __shfl_xor_sync(0xfffffff, my_r, i, 32); // First thread writes to the buffer if (laneid == 0) result.write(my_r); } } __global__ void ReduceBufferMax_kernel(const Legion::DeferredBuffer<double, 1> buffer, const Legion::DeferredValue<double> result, const size_t size) { // We know there is never more than 32 warps in a CTA __shared__ double trampoline[32]; // Each thread reduces all the correspoinding values int offset = threadIdx.x; double my_r = 0.0; // Spectral radius cannot be lower than 0 while (offset < size) { my_r = max(my_r, buffer[Legion::Point<1>(offset)]); offset += blockDim.x; } // make sure that everyone is done with its reduction __syncthreads(); // Perform a local reduction inside the CTA // Butterfly reduction across all threads in all warps for (int i = 16; i >= 1; i/=2) my_r = max(my_r, __shfl_xor_sync(0xfffffff, my_r, i, 32)); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // First thread in each warp writes out all values if (laneid == 0) trampoline[warpid] = my_r; __syncthreads(); // Butterfly reduction across all threads in the first warp if (warpid == 0) { unsigned numwarps = blockDim.x >> 5; my_r = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) my_r = max(my_r, __shfl_xor_sync(0xfffffff, my_r, i, 32)); // First thread writes to the buffer if (laneid == 0) result.write(my_r); } }
ececb7e152e90cafd6a60c5e26f26c96be849daf.cu
// Copyright (c) "2021, by Centre Européen de Recherche et de Formation Avancée en Calcul Scientifiq // Developer: Mario Di Renzo // Affiliation: Centre Européen de Recherche et de Formation Avancée en Calcul Scientifique // URL: https://cerfacs.fr // Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020). // HTR solver: An open-source exascale-oriented task-based // multi-GPU high-order code for hypersonic aerothermodynamics. // Computer Physics Communications 255, 107262" // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "cuda_utils.hpp" //----------------------------------------------------------------------------- // These utilities perform butterfly reduction across a DeferedBuffer //----------------------------------------------------------------------------- __global__ void ReduceBufferSum_kernel(const Legion::DeferredBuffer<double, 1> buffer, const Legion::DeferredValue<double> result, const size_t size) { // We know there is never more than 32 warps in a CTA __shared__ double trampoline[32]; // Each thread reduces all the correspoinding values int offset = threadIdx.x; double my_r = 0.0; // Spectral radius cannot be lower than 0 while (offset < size) { my_r += buffer[Legion::Point<1>(offset)]; offset += blockDim.x; } // make sure that everyone is done with its reduction __syncthreads(); // Perform a local reduction inside the CTA // Butterfly reduction across all threads in all warps for (int i = 16; i >= 1; i/=2) my_r += __shfl_xor_sync(0xfffffff, my_r, i, 32); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // First thread in each warp writes out all values if (laneid == 0) trampoline[warpid] = my_r; __syncthreads(); // Butterfly reduction across all threads in the first warp if (warpid == 0) { unsigned numwarps = blockDim.x >> 5; my_r = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) my_r += __shfl_xor_sync(0xfffffff, my_r, i, 32); // First thread writes to the buffer if (laneid == 0) result.write(my_r); } } __global__ void ReduceBufferMax_kernel(const Legion::DeferredBuffer<double, 1> buffer, const Legion::DeferredValue<double> result, const size_t size) { // We know there is never more than 32 warps in a CTA __shared__ double trampoline[32]; // Each thread reduces all the correspoinding values int offset = threadIdx.x; double my_r = 0.0; // Spectral radius cannot be lower than 0 while (offset < size) { my_r = max(my_r, buffer[Legion::Point<1>(offset)]); offset += blockDim.x; } // make sure that everyone is done with its reduction __syncthreads(); // Perform a local reduction inside the CTA // Butterfly reduction across all threads in all warps for (int i = 16; i >= 1; i/=2) my_r = max(my_r, __shfl_xor_sync(0xfffffff, my_r, i, 32)); unsigned laneid; asm volatile("mov.u32 %0, %laneid;" : "=r"(laneid) : ); unsigned warpid = threadIdx.x >> 5; // First thread in each warp writes out all values if (laneid == 0) trampoline[warpid] = my_r; __syncthreads(); // Butterfly reduction across all threads in the first warp if (warpid == 0) { unsigned numwarps = blockDim.x >> 5; my_r = (laneid < numwarps) ? trampoline[laneid] : 0; for (int i = 16; i >= 1; i/=2) my_r = max(my_r, __shfl_xor_sync(0xfffffff, my_r, i, 32)); // First thread writes to the buffer if (laneid == 0) result.write(my_r); } }
5fab4dcd1403adf9f66a9589b5bcb75c02d10dbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock; __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; // shared memoryblock int i = threadIdx.x + blockIdx.x * blockDim.x; // int j = threadIdx.x; // blockshared memory cache[j] += a[i]*b[i]; __syncthreads(); // threadIdx int k=threadsPerBlock/2; while (k>0) { if (j<k) cache[j]+=cache[j+k]; __syncthreads(); // threadIdx k = k/2; } if (threadIdx.x==0) c[blockIdx.x]=cache[0]; } /*** for (blockIdx.x = 1; blockIdx.x<M; blockIdx.x++) { __shared__ float cache[threadsPerBlock]; for (int threadIdx.x = 1; threadIdx.x<N; threadIdx.x++) { cache[threadIdx.x] = a[threadIdx.x + blockIdx.x * blockDim.x] * b[threadIdx.x + blockIdx.x * blockDim.x]; } int k=threadsPerBlock/2; while (k>0) { for (int threadIdx.x = 1; threadIdx.x<N; threadIdx.x++) { if (threadIdx.x<k) cache[threadIdx.x]+=cache[threadIdx.x+k]; } k = k/2; } c[blockIdx.x]=cache[0]; } /****
5fab4dcd1403adf9f66a9589b5bcb75c02d10dbb.cu
const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock; __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; // shared memory只给一个block用 int i = threadIdx.x + blockIdx.x * blockDim.x; //用于全局变量的寻址 int j = threadIdx.x; // 用于本block内的shared memory寻址 cache[j] += a[i]*b[i]; __syncthreads(); // 重新启动一次threadIdx的遍历 int k=threadsPerBlock/2; while (k>0) { if (j<k) cache[j]+=cache[j+k]; __syncthreads(); // 重新启动一次threadIdx的遍历 k = k/2; } if (threadIdx.x==0) c[blockIdx.x]=cache[0]; } /***可以想象成为 for (blockIdx.x = 1; blockIdx.x<M; blockIdx.x++) { __shared__ float cache[threadsPerBlock]; for (int threadIdx.x = 1; threadIdx.x<N; threadIdx.x++) { cache[threadIdx.x] = a[threadIdx.x + blockIdx.x * blockDim.x] * b[threadIdx.x + blockIdx.x * blockDim.x]; } int k=threadsPerBlock/2; while (k>0) { for (int threadIdx.x = 1; threadIdx.x<N; threadIdx.x++) { if (threadIdx.x<k) cache[threadIdx.x]+=cache[threadIdx.x+k]; } k = k/2; } c[blockIdx.x]=cache[0]; } /****
265cfb850dc46314e1ae852f40dcbf747885a165.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <cmath> #include <iostream> #include <string> #include <stdio.h> __global__ void Kernel(float *X, float *Y, float *Z) { unsigned int idx_X = threadIdx.x + blockIdx.x * blockDim.x; unsigned int idx_Y = threadIdx.y + blockIdx.y * blockDim.y; Z[idx_X*dim + idx_Y] = 20. + X[idx_X]*X[idx_X] + Y[idx_Y]*Y[idx_Y] - 10.*(cosf(2.*3.14*X[idx_X]) + cosf(2.*3.14*X[idx_X])); } void initialization(const float leftB, const float rightB, float *X, const unsigned int dim) { try { float step = (rightB - leftB)/(float)dim; if (step <= 0) throw "error"; X[0] = leftB; for( unsigned int i=1; i<dim ; i++) { X[i] = X[i-1] + step; } } catch (...) { fprintf(stderr, "step failed!"); exit(1); } } int main() { int dim = 2048; size_t mem_size = sizeof(float)*dim; hipError_t cudaStatus; float *hostX, *hostY, *hostZ; float *devX, *devY, *devZ; float rightB, leftB; leftB = -5; rightB = 5; hostX = (float*)malloc(mem_size); hostY = (float*)malloc(mem_size); hostZ = (float*)malloc(mem_size*mem_size); initialization(leftB, rightB, hostX, dim); //memcpy(hostY,hostX,mem_size); hipMalloc((void**)&devX, mem_size); hipMalloc((void**)&devY, mem_size); hipMalloc((void**)&devZ, mem_size*mem_size); hipMemcpy(devX, hostX, mem_size, hipMemcpyHostToDevice); hipMemcpy(devY, devX, mem_size, hipMemcpyDeviceToDevice); dim3 N_Grid (dim/32,dim/32,1); dim3 N_Block (48,48,1); hipLaunchKernelGGL(( Kernel) , dim3(N_Grid), dim3(N_Block) , 0, 0, devX,devY,devZ); cudaStatus = hipGetLastError(); if(cudaStatus != hipSuccess) { printf("Last error: %s\n", hipGetErrorString(cudaStatus)); return 0; } hipMemcpy(hostZ, devZ, mem_size*mem_size, hipMemcpyDeviceToHost); for(unsigned int i=0; i<dim*dim; i++) { std::cout << "i: " << hostZ[i] << std::endl; } hipFree(devX); hipFree(devY); hipFree(devZ); free(hostX); free(hostY); free(hostZ); return 0; }
265cfb850dc46314e1ae852f40dcbf747885a165.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #define _USE_MATH_DEFINES #include <cmath> #include <iostream> #include <string> #include <stdio.h> __global__ void Kernel(float *X, float *Y, float *Z) { unsigned int idx_X = threadIdx.x + blockIdx.x * blockDim.x; unsigned int idx_Y = threadIdx.y + blockIdx.y * blockDim.y; Z[idx_X*dim + idx_Y] = 20. + X[idx_X]*X[idx_X] + Y[idx_Y]*Y[idx_Y] - 10.*(cosf(2.*3.14*X[idx_X]) + cosf(2.*3.14*X[idx_X])); } void initialization(const float leftB, const float rightB, float *X, const unsigned int dim) { try { float step = (rightB - leftB)/(float)dim; if (step <= 0) throw "error"; X[0] = leftB; for( unsigned int i=1; i<dim ; i++) { X[i] = X[i-1] + step; } } catch (...) { fprintf(stderr, "step failed!"); exit(1); } } int main() { int dim = 2048; size_t mem_size = sizeof(float)*dim; cudaError cudaStatus; float *hostX, *hostY, *hostZ; float *devX, *devY, *devZ; float rightB, leftB; leftB = -5; rightB = 5; hostX = (float*)malloc(mem_size); hostY = (float*)malloc(mem_size); hostZ = (float*)malloc(mem_size*mem_size); initialization(leftB, rightB, hostX, dim); //memcpy(hostY,hostX,mem_size); cudaMalloc((void**)&devX, mem_size); cudaMalloc((void**)&devY, mem_size); cudaMalloc((void**)&devZ, mem_size*mem_size); cudaMemcpy(devX, hostX, mem_size, cudaMemcpyHostToDevice); cudaMemcpy(devY, devX, mem_size, cudaMemcpyDeviceToDevice); dim3 N_Grid (dim/32,dim/32,1); dim3 N_Block (48,48,1); Kernel <<< N_Grid, N_Block >>> (devX,devY,devZ); cudaStatus = cudaGetLastError(); if(cudaStatus != cudaSuccess) { printf("Last error: %s\n", cudaGetErrorString(cudaStatus)); return 0; } cudaMemcpy(hostZ, devZ, mem_size*mem_size, cudaMemcpyDeviceToHost); for(unsigned int i=0; i<dim*dim; i++) { std::cout << "i: " << hostZ[i] << std::endl; } cudaFree(devX); cudaFree(devY); cudaFree(devZ); free(hostX); free(hostY); free(hostZ); return 0; }
6c75246bfdc45b1150de01989391005b1023682b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include <stdio.h> #define TX 32 #define TY 32 #define LEN 5.f #define STEPNUMBER 2 // scale coordinates onto [-LEN, LEN] __device__ float scale(int i, int w) { return 2*LEN*(((1.f*i)/w) - 0.5f); } // Newton's Method for Finding Roots __device__ float fx(float x) { return x*x*x - x; } __device__ float fxprime(float x) { return 3*x*x - 1; } __device__ float2 newton(float x, float y, int stepNumber) { float dx = 0.f; for (float step = 0; step < stepNumber; step += 1){ dx = fxprime(x); x += -(y/dx); y = fx(x); } return make_float2(x,y); } __device__ unsigned char clip(float x){ return x > 255 ? 255 : (x < 0 ? 0 : x); } // kernel function to compute decay and shading __global__ void stabImageKernel(uchar4 *d_out, int w, int h, float p, int s) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r = blockIdx.y*blockDim.y + threadIdx.y; if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const float x0 = scale(c, w); const float y0 = scale(r, h); const float dist_0 = sqrt(x0*x0); const float2 pos = newton(x0, y0, STEPNUMBER); const float dist_f = sqrt(pos.x*pos.x); // assign colors based on distance from origin const float dist_r = dist_f/dist_0; d_out[i].x = clip(dist_r*255); // red ~ growth d_out[i].y = ((c == w/2) || (r == h/2)) ? 255 : 0; // axes d_out[i].z = clip((1/dist_r)*255); // blue - 1/growth d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, float p, int s) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( stabImageKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, p, s); }
6c75246bfdc45b1150de01989391005b1023682b.cu
#include "kernel.h" #include <stdio.h> #define TX 32 #define TY 32 #define LEN 5.f #define STEPNUMBER 2 // scale coordinates onto [-LEN, LEN] __device__ float scale(int i, int w) { return 2*LEN*(((1.f*i)/w) - 0.5f); } // Newton's Method for Finding Roots __device__ float fx(float x) { return x*x*x - x; } __device__ float fxprime(float x) { return 3*x*x - 1; } __device__ float2 newton(float x, float y, int stepNumber) { float dx = 0.f; for (float step = 0; step < stepNumber; step += 1){ dx = fxprime(x); x += -(y/dx); y = fx(x); } return make_float2(x,y); } __device__ unsigned char clip(float x){ return x > 255 ? 255 : (x < 0 ? 0 : x); } // kernel function to compute decay and shading __global__ void stabImageKernel(uchar4 *d_out, int w, int h, float p, int s) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r = blockIdx.y*blockDim.y + threadIdx.y; if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const float x0 = scale(c, w); const float y0 = scale(r, h); const float dist_0 = sqrt(x0*x0); const float2 pos = newton(x0, y0, STEPNUMBER); const float dist_f = sqrt(pos.x*pos.x); // assign colors based on distance from origin const float dist_r = dist_f/dist_0; d_out[i].x = clip(dist_r*255); // red ~ growth d_out[i].y = ((c == w/2) || (r == h/2)) ? 255 : 0; // axes d_out[i].z = clip((1/dist_r)*255); // blue - 1/growth d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, float p, int s) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); stabImageKernel<<<gridSize, blockSize>>>(d_out, w, h, p, s); }
77cc9f08ac354c3fff141983c9e07818f066e8a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "csv_common.h" #include "csv_gpu.h" #include "datetime.cuh" #include <io/utilities/block_utils.cuh> #include <io/utilities/parsing_utils.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/lists/list_view.cuh> #include <cudf/null_mask.hpp> #include <cudf/strings/detail/convert/fixed_point.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/structs/struct_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <io/utilities/trie.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/detail/copy.h> #include <thrust/transform.h> #include <type_traits> using namespace ::cudf::io; using cudf::device_span; namespace cudf { namespace io { namespace csv { namespace gpu { /// Block dimension for dtype detection and conversion kernels constexpr uint32_t csvparse_block_dim = 128; /* * @brief Returns true is the input character is a valid digit. * Supports both decimal and hexadecimal digits (uppercase and lowercase). * * @param c Character to check * @param is_hex Whether to check as a hexadecimal * * @return `true` if it is digit-like, `false` otherwise */ __device__ __inline__ bool is_digit(char c, bool is_hex = false) { if (c >= '0' && c <= '9') return true; if (is_hex) { if (c >= 'A' && c <= 'F') return true; if (c >= 'a' && c <= 'f') return true; } return false; } /* * @brief Checks whether the given character counters indicate a potentially * valid date and/or time field. * * For performance and simplicity, we detect only the most common date * formats. Example formats that are detectable: * * `2001/02/30` * `2001-02-30 00:00:00` * `2/30/2001 T04:05:60.7` * `2 / 1 / 2011` * `02/January` * * @param len Number of non special-symbol or numeric characters * @param decimal_count Number of '.' characters * @param colon_count Number of ':' characters * @param dash_count Number of '-' characters * @param slash_count Number of '/' characters * * @return `true` if it is date-like, `false` otherwise */ __device__ __inline__ bool is_datetime( long len, long decimal_count, long colon_count, long dash_count, long slash_count) { // Must not exceed count of longest month (September) plus `T` time indicator if (len > 10) { return false; } // Must not exceed more than one decimals or more than two time separators if (decimal_count > 1 || colon_count > 2) { return false; } // Must have one or two '-' or '/' but not both as date separators if ((dash_count > 0 && dash_count < 3 && slash_count == 0) || (dash_count == 0 && slash_count > 0 && slash_count < 3)) { return true; } return false; } /* * @brief Returns true if the counters indicate a potentially valid float. * False positives are possible because positions are not taken into account. * For example, field "e.123-" would match the pattern. * * @param len Number of non special-symbol or numeric characters * @param digit_count Number of digits characters * @param decimal_count Number of occurrences of the decimal point character * @param thousands_count Number of occurrences of the thousands separator character * @param dash_count Number of '-' characters * @param exponent_count Number of 'e or E' characters * * @return `true` if it is floating point-like, `false` otherwise */ __device__ __inline__ bool is_floatingpoint(long len, long digit_count, long decimal_count, long thousands_count, long dash_count, long exponent_count) { // Can't have more than one exponent and one decimal point if (decimal_count > 1) return false; if (exponent_count > 1) return false; // Without the exponent or a decimal point, this is an integer, not a float if (decimal_count == 0 && exponent_count == 0) return false; // Can only have one '-' per component if (dash_count > 1 + exponent_count) return false; // If anything other than these characters is present, it's not a float if (digit_count + decimal_count + dash_count + exponent_count + thousands_count != len) { return false; } // Needs at least 1 digit, 2 if exponent is present if (digit_count < 1 + exponent_count) return false; return true; } /* * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed in one row/record at a time, so the number of total * threads (tid) is equal to the number of rows. * * @param opts A set of parsing options * @param csv_text The entire CSV data to read * @param column_flags Per-column parsing behavior flags * @param row_offsets The start the CSV data of interest * @param d_column_data The count for each column data type */ __global__ void __launch_bounds__(csvparse_block_dim) data_type_detection(parse_options_view const opts, device_span<char const> csv_text, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_offsets, device_span<column_type_histogram> d_column_data) { auto const raw_csv = csv_text.data(); // ThreadIds range per block, so also need the blockId // This is entry into the fields; threadId is an element within `num_records` long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) { return; } auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; // Going through all the columns of a given record while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts); // Checking if this is a column that the user wants --- user can filter columns if (column_flags[col] & column_parse::enabled) { // points to last character in the field auto const field_len = static_cast<size_t>(next_delimiter - field_start); if (serialized_trie_contains(opts.trie_na, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].null_count, 1); } else if (serialized_trie_contains(opts.trie_true, {field_start, field_len}) || serialized_trie_contains(opts.trie_false, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].bool_count, 1); } else if (cudf::io::is_infinity(field_start, next_delimiter)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { long count_number = 0; long count_decimal = 0; long count_thousands = 0; long count_slash = 0; long count_dash = 0; long count_plus = 0; long count_colon = 0; long count_string = 0; long count_exponent = 0; // Modify field_start & end to ignore whitespace and quotechars // This could possibly result in additional empty fields auto const trimmed_field_range = trim_whitespaces_quotes(field_start, next_delimiter); auto const trimmed_field_len = trimmed_field_range.second - trimmed_field_range.first; for (auto cur = trimmed_field_range.first; cur < trimmed_field_range.second; ++cur) { if (is_digit(*cur)) { count_number++; continue; } if (*cur == opts.decimal) { count_decimal++; continue; } if (*cur == opts.thousands) { count_thousands++; continue; } // Looking for unique characters that will help identify column types. switch (*cur) { case '-': count_dash++; break; case '+': count_plus++; break; case '/': count_slash++; break; case ':': count_colon++; break; case 'e': case 'E': if (cur > trimmed_field_range.first && cur < trimmed_field_range.second - 1) count_exponent++; break; default: count_string++; break; } } // Integers have to have the length of the string // Off by one if they start with a minus sign auto const int_req_number_cnt = trimmed_field_len - count_thousands - ((*trimmed_field_range.first == '-' || *trimmed_field_range.first == '+') && trimmed_field_len > 1); if (column_flags[col] & column_parse::as_datetime) { // PANDAS uses `object` dtype if the date is unparseable if (is_datetime(count_string, count_decimal, count_colon, count_dash, count_slash)) { atomicAdd(&d_column_data[actual_col].datetime_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } else if (count_number == int_req_number_cnt) { auto const is_negative = (*trimmed_field_range.first == '-'); auto const data_begin = trimmed_field_range.first + (is_negative || (*trimmed_field_range.first == '+')); cudf::size_type* ptr = cudf::io::gpu::infer_integral_field_counter( data_begin, data_begin + count_number, is_negative, d_column_data[actual_col]); atomicAdd(ptr, 1); } else if (is_floatingpoint(trimmed_field_len, count_number, count_decimal, count_thousands, count_dash + count_plus, count_exponent)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } actual_col++; } next_field = next_delimiter + 1; field_start = next_field; col++; } } template <typename T, int base> __inline__ __device__ T decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::io::parse_numeric<T, base>(begin, end, opts); } template <typename T> __inline__ __device__ T decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::io::parse_numeric<T>(begin, end, opts); } template <> __inline__ __device__ cudf::timestamp_D decode_value(char const* begin, char const* end, parse_options_view const& opts) { return timestamp_D{cudf::duration_D{to_date(begin, end, opts.dayfirst)}}; } template <> __inline__ __device__ cudf::timestamp_s decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_s{cudf::duration_s{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_s{cudf::duration_s{milli / 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ms decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ms{cudf::duration_ms{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ms{cudf::duration_ms{milli}}; } } template <> __inline__ __device__ cudf::timestamp_us decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_us{cudf::duration_us{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_us{cudf::duration_us{milli * 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ns decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ns{cudf::duration_ns{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ns{cudf::duration_ns{milli * 1000000}}; } } #ifndef DURATION_DECODE_VALUE #define DURATION_DECODE_VALUE(Type) \ template <> \ __inline__ __device__ Type decode_value( \ const char* begin, const char* end, parse_options_view const& opts) \ { \ return Type{to_time_delta<Type>(begin, end)}; \ } #endif DURATION_DECODE_VALUE(duration_D) DURATION_DECODE_VALUE(duration_s) DURATION_DECODE_VALUE(duration_ms) DURATION_DECODE_VALUE(duration_us) DURATION_DECODE_VALUE(duration_ns) // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::string_view decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::string_view{}; } // The purpose of this is merely to allow compilation ONLY template <> __inline__ __device__ cudf::dictionary32 decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::dictionary32{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::list_view decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::list_view{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::struct_view decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::struct_view{}; } /** * @brief Functor for converting CSV raw data to typed value. */ struct decode_op { /** * @brief Dispatch for numeric types whose values can be convertible to * 0 or 1 to represent boolean false/true, based upon checking against a * true/false values list. * * @return bool Whether the parsed value is valid. */ template <typename T, typename std::enable_if_t<std::is_integral_v<T> and !std::is_same_v<T, bool> and !cudf::is_fixed_point<T>()>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<T*>(out_buffer)[row] = [&flags, &opts, begin, end]() -> T { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return 1; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return 0; } return flags & column_parse::as_hexadecimal ? decode_value<T, 16>(begin, end, opts) : decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for fixed point types. * * @return bool Whether the parsed value is valid. */ template <typename T, typename std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type output_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<device_storage_type_t<T>*>(out_buffer)[row] = [&flags, &opts, output_type, begin, end]() -> device_storage_type_t<T> { return strings::detail::parse_decimal<device_storage_type_t<T>>( begin, end, output_type.scale()); }(); return true; } /** * @brief Dispatch for boolean type types. */ template <typename T, typename std::enable_if_t<std::is_same_v<T, bool>>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<T*>(out_buffer)[row] = [&opts, begin, end]() { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return true; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return false; } return decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for floating points, which are set to NaN if the input * is not valid. In such case, the validity mask is set to zero too. */ template <typename T, typename std::enable_if_t<std::is_floating_point_v<T>>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { T const value = decode_value<T>(begin, end, opts); static_cast<T*>(out_buffer)[row] = value; return !std::isnan(value); } /** * @brief Dispatch for all other types. */ template <typename T, typename std::enable_if_t<!std::is_integral_v<T> and !std::is_floating_point_v<T> and !cudf::is_fixed_point<T>()>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<T*>(out_buffer)[row] = decode_value<T>(begin, end, opts); return true; } }; /** * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed one record at a time * * @param[in] options A set of parsing options * @param[in] data The entire CSV data to read * @param[in] column_flags Per-column parsing behavior flags * @param[in] row_offsets The start the CSV data of interest * @param[in] dtypes The data type of the column * @param[out] columns The output column data * @param[out] valids The bitmaps indicating whether column fields are valid */ __global__ void __launch_bounds__(csvparse_block_dim) convert_csv_to_cudf(cudf::io::parse_options_view options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void* const> columns, device_span<cudf::bitmask_type* const> valids) { auto const raw_csv = data.data(); // thread IDs range per block, so also need the block id. // this is entry into the field array - tid is an elements within the num_entries array long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) return; auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options); if (column_flags[col] & column_parse::enabled) { // check if the entire field is a NaN string - consistent with pandas auto const is_valid = !serialized_trie_contains( options.trie_na, {field_start, static_cast<size_t>(next_delimiter - field_start)}); // Modify field_start & end to ignore whitespace and quotechars auto field_end = next_delimiter; if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) { auto const trimmed_field = trim_whitespaces_quotes(field_start, field_end, options.quotechar); field_start = trimmed_field.first; field_end = trimmed_field.second; } if (is_valid) { // Type dispatcher does not handle STRING if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto end = next_delimiter; if (options.keepquotes == false) { if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) { ++field_start; --end; } } auto str_list = static_cast<std::pair<const char*, size_t>*>(columns[actual_col]); str_list[rec_id].first = field_start; str_list[rec_id].second = end - field_start; } else { if (cudf::type_dispatcher(dtypes[actual_col], decode_op{}, columns[actual_col], rec_id, dtypes[actual_col], field_start, field_end, options, column_flags[col])) { // set the valid bitmap - all bits were set to 0 to start set_bit(valids[actual_col], rec_id); } } } else if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto str_list = static_cast<std::pair<const char*, size_t>*>(columns[actual_col]); str_list[rec_id].first = nullptr; str_list[rec_id].second = 0; } ++actual_col; } next_field = next_delimiter + 1; field_start = next_field; ++col; } } /* * @brief Merge two packed row contexts (each corresponding to a block of characters) * and return the packed row context corresponding to the merged character block */ inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx, packed_rowctx_t second_ctx) { uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3; uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3; uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3; return (first_ctx & ~pack_row_contexts(3, 3, 3)) + pack_row_contexts(get_row_context(second_ctx, id0), get_row_context(second_ctx, id1), get_row_context(second_ctx, id2)); } /* * @brief Per-character context: * 1-bit count (0 or 1) per context in the lower 4 bits * 2-bit output context id per input context in bits 8..15 */ constexpr __device__ uint32_t make_char_context(uint32_t id0, uint32_t id1, uint32_t id2 = ROW_CTX_COMMENT, uint32_t c0 = 0, uint32_t c1 = 0, uint32_t c2 = 0) { return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) | (c2 << 2); } /* * @brief Merge a 1-character context to keep track of bitmasks where new rows occur * Merges a single-character "block" row context at position pos with the current * block's row context (the current block contains 32-pos characters) * * @param ctx Current block context and new rows bitmaps * @param char_ctx state transitions associated with new character * @param pos Position within the current 32-character block * * NOTE: This is probably the most performance-critical piece of the row gathering kernel. * The char_ctx value should be created via make_char_context, and its value should * have been evaluated at compile-time. */ inline __device__ void merge_char_context(uint4& ctx, uint32_t char_ctx, uint32_t pos) { uint32_t id0 = (ctx.w >> 0) & 3; uint32_t id1 = (ctx.w >> 2) & 3; uint32_t id2 = (ctx.w >> 4) & 3; // Set the newrow bit in the bitmap at the corresponding position ctx.x |= ((char_ctx >> id0) & 1) << pos; ctx.y |= ((char_ctx >> id1) & 1) << pos; ctx.z |= ((char_ctx >> id2) & 1) << pos; // Update the output context ids ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) | ((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6); } /* * Convert the context-with-row-bitmaps version to a packed row context */ inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map) { return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3), make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3), make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3)); } /* * Selects the row bitmap corresponding to the given parser state */ inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid) { return (ctxid == ROW_CTX_NONE) ? ctx_map.x : (ctxid == ROW_CTX_QUOTE) ? ctx_map.y : (ctxid == ROW_CTX_COMMENT) ? ctx_map.z : 0; } /** * @brief Single pair-wise 512-wide row context merge transform * * Merge row context blocks and record the merge operation in a context * tree so that the transform is reversible. * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * @tparam lanemask mask to specify source of packed row context * @tparam tmask mask to specify principle thread for merging row context * @tparam base start location for writing into packed row context tree * @tparam level_scale level of the node in the tree * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale> inline __device__ void ctx_merge(uint64_t* ctxtree, packed_rowctx_t* ctxb, uint32_t t) { uint64_t tmp = shuffle_xor(*ctxb, lanemask); if (!(t & tmask)) { *ctxb = merge_row_contexts(*ctxb, tmp); ctxtree[base + (t >> level_scale)] = *ctxb; } } /** * @brief Single 512-wide row context inverse merge transform * * Walks the context tree starting from a root node * * @tparam rmask Mask to specify which threads write input row context * @param[in] base Start read location of the merge transform tree * @param[in] ctxtree Merge transform tree * @param[in] ctx Input context * @param[in] brow4 output row in block *4 * @param[in] t thread id (leaf node id) */ template <uint32_t rmask> inline __device__ void ctx_unmerge( uint32_t base, uint64_t* ctxtree, uint32_t* ctx, uint32_t* brow4, uint32_t t) { rowctx32_t ctxb_left, ctxb_right, ctxb_sum; ctxb_sum = get_row_context(ctxtree[base], *ctx); ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx); ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3); if (t & (rmask)) { *brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3); *ctx = ctxb_left & 3; } } /* * @brief 512-wide row context merge transform * * Repeatedly merge row context blocks, keeping track of each merge operation * in a context tree so that the transform is reversible * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * Each node contains the counts and output contexts corresponding to the * possible input contexts. * Each parent node's count is obtained by adding the corresponding counts * from the left child node with the right child node's count selected from * the left child node's output context: * parent.count[k] = left.count[k] + right.count[left.outctx[k]] * parent.outctx[k] = right.outctx[left.outctx[k]] * * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024], packed_rowctx_t ctxb, uint32_t t) { ctxtree[512 + t] = ctxb; ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t); __syncthreads(); if (t < 32) { ctxb = ctxtree[32 + t]; ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t); // Final stage uint64_t tmp = shuffle_xor(ctxb, 16); if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); } } } /* * @brief 512-wide row context inverse merge transform * * Walks the context tree starting from the root node (index 1) using * the starting context in node index 0. * The return value is the starting row and input context for the given leaf node * * @param[in] ctxtree Merge transform tree * @param[in] t thread id (leaf node id) * * @return Final row context and count (row_position*4 + context_id format) */ static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024], uint32_t t) { uint32_t ctx = ctxtree[0] & 3; // Starting input context rowctx32_t brow4 = 0; // output row in block *4 ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t); ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t); ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t); ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t); ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t); ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t); ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t); ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t); ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t); return brow4 + ctx; } /** * @brief Gather row offsets from CSV character data split into 16KB chunks * * This is done in two phases: the first phase returns the possible row counts * per 16K character block for each possible parsing context at the start of the block, * along with the resulting parsing context at the end of the block. * The caller can then compute the actual parsing context at the beginning of each * individual block and total row count. * The second phase outputs the location of each row in the block, using the parsing * context and initial row counter accumulated from the results of the previous phase. * Row parsing context will be updated after phase 2 such that the value contains * the number of rows starting at byte_range_end or beyond. * * @param row_ctx Row parsing context (output of phase 1 or input to phase 2) * @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2) * @param data Base pointer of character data (all row offsets are relative to this) * @param chunk_size Total number of characters to parse * @param parse_pos Current parsing position in the file * @param start_offset Position of the start of the character buffer in the file * @param data_size CSV file size * @param byte_range_start Ignore rows starting before this position in the file * @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx * @param skip_rows Number of rows to skip (ignored in phase 1) * @param terminator Line terminator character * @param delimiter Column delimiter character * @param quotechar Quote character * @param escapechar Delimiter escape character * @param commentchar Comment line character (skip rows starting with this character) */ __global__ void __launch_bounds__(rowofs_block_dim) gather_row_offsets_gpu(uint64_t* row_ctx, device_span<uint64_t> offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, int terminator, int delimiter, int quotechar, int escapechar, int commentchar) { auto start = data.begin(); using block_reduce = typename hipcub::BlockReduce<uint32_t, rowofs_block_dim>; __shared__ union { typename block_reduce::TempStorage bk_storage; __align__(8) uint64_t ctxtree[rowofs_block_dim * 2]; } temp_storage; const char* end = start + (min(parse_pos + chunk_size, data_size) - start_offset); uint32_t t = threadIdx.x; size_t block_pos = (parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32; const char* cur = start + block_pos; // Initial state is neutral context (no state transitions), zero rows uint4 ctx_map = { .x = 0, .y = 0, .z = 0, .w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)}; int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator; // Loop through all 32 bytes and keep a bitmask of row starts for each possible input context for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) { uint32_t ctx; if (cur < end) { c = cur[0]; if (c_prev == terminator) { if (c == commentchar) { // Start of a new comment row ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1); } else if (c == quotechar) { // Quoted string on newrow, or quoted string ending in terminator ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1); } else { // Start of a new row unless within a quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1); } } else if (c == quotechar) { if (c_prev == delimiter || c_prev == quotechar) { // Quoted string after delimiter, quoted string ending in delimiter, or double-quote ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE); } else { // Closing or ignored quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE); } } else { // Neutral character ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE); } } else { const char* data_end = start + data_size - start_offset; if (cur <= end && cur == data_end) { // Add a newline at data end (need the extra row offset to infer length of previous row) ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1); } else { // Pass-through context (beyond chunk_size or data_end) ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT); } } // Merge with current context, keeping track of where new rows occur merge_char_context(ctx_map, ctx, pos); } // Eliminate rows that start before byte_range_start if (start_offset + block_pos < byte_range_start) { uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31)); uint32_t mask = 0xfffffffe << dist_minus1; ctx_map.x &= mask; ctx_map.y &= mask; ctx_map.z &= mask; } // Convert the long-form {rowmap,outctx}[inctx] version into packed version // {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into // a single 16K-character block context rowctx_merge_transform(temp_storage.ctxtree, pack_rowmaps(ctx_map), t); // If this is the second phase, get the block's initial parser state and row counter if (offsets_out.data()) { if (t == 0) { temp_storage.ctxtree[0] = row_ctx[blockIdx.x]; } __syncthreads(); // Walk back the transform tree with the known initial parser state rowctx32_t ctx = rowctx_inverse_merge_transform(temp_storage.ctxtree, t); uint64_t row = (temp_storage.ctxtree[0] >> 2) + (ctx >> 2); uint32_t rows_out_of_range = 0; uint32_t rowmap = select_rowmap(ctx_map, ctx & 3); // Output row positions while (rowmap != 0) { uint32_t pos = __ffs(rowmap); block_pos += pos; if (row >= skip_rows && row - skip_rows < offsets_out.size()) { // Output byte offsets are relative to the base of the input buffer offsets_out[row - skip_rows] = block_pos - 1; rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end); } row++; rowmap >>= pos; } __syncthreads(); // Return the number of rows out of range rows_out_of_range = block_reduce(temp_storage.bk_storage).Sum(rows_out_of_range); if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; } } else { // Just store the row counts and output contexts if (t == 0) { row_ctx[blockIdx.x] = temp_storage.ctxtree[1]; } } } size_t __host__ count_blank_rows(const cudf::io::parse_options_view& opts, device_span<char const> data, device_span<uint64_t const> row_offsets, rmm::cuda_stream_view stream) { const auto newline = opts.skipblanklines ? opts.terminator : opts.comment; const auto comment = opts.comment != '\0' ? opts.comment : newline; const auto carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment; return thrust::count_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != data.size()) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); } device_span<uint64_t> __host__ remove_blank_rows(cudf::io::parse_options_view const& options, device_span<char const> data, device_span<uint64_t> row_offsets, rmm::cuda_stream_view stream) { size_t d_size = data.size(); const auto newline = options.skipblanklines ? options.terminator : options.comment; const auto comment = options.comment != '\0' ? options.comment : newline; const auto carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment; auto new_end = thrust::remove_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, d_size, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != d_size) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); return row_offsets.subspan(0, new_end - row_offsets.begin()); } std::vector<column_type_histogram> detect_column_types( cudf::io::parse_options_view const& options, device_span<char const> const data, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_starts, size_t const num_active_columns, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count const int block_size = csvparse_block_dim; const int grid_size = (row_starts.size() + block_size - 1) / block_size; auto d_stats = detail::make_zeroed_device_uvector_async<column_type_histogram>(num_active_columns, stream); hipLaunchKernelGGL(( data_type_detection), dim3(grid_size), dim3(block_size), 0, stream.value(), options, data, column_flags, row_starts, d_stats); return detail::make_std_vector_sync(d_stats, stream); } void __host__ decode_row_column_data(cudf::io::parse_options_view const& options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void* const> columns, device_span<cudf::bitmask_type* const> valids, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count auto const block_size = csvparse_block_dim; auto const num_rows = row_offsets.size() - 1; auto const grid_size = (num_rows + block_size - 1) / block_size; hipLaunchKernelGGL(( convert_csv_to_cudf), dim3(grid_size), dim3(block_size), 0, stream.value(), options, data, column_flags, row_offsets, dtypes, columns, valids); } uint32_t __host__ gather_row_offsets(const parse_options_view& options, uint64_t* row_ctx, device_span<uint64_t> const offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, rmm::cuda_stream_view stream) { uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes); hipLaunchKernelGGL(( gather_row_offsets_gpu), dim3(dim_grid), dim3(rowofs_block_dim), 0, stream.value(), row_ctx, offsets_out, data, chunk_size, parse_pos, start_offset, data_size, byte_range_start, byte_range_end, skip_rows, options.terminator, options.delimiter, (options.quotechar) ? options.quotechar : 0x100, /*(options.escapechar) ? options.escapechar :*/ 0x100, (options.comment) ? options.comment : 0x100); return dim_grid; } } // namespace gpu } // namespace csv } // namespace io } // namespace cudf
77cc9f08ac354c3fff141983c9e07818f066e8a3.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "csv_common.h" #include "csv_gpu.h" #include "datetime.cuh" #include <io/utilities/block_utils.cuh> #include <io/utilities/parsing_utils.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/lists/list_view.cuh> #include <cudf/null_mask.hpp> #include <cudf/strings/detail/convert/fixed_point.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/structs/struct_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/span.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <io/utilities/trie.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/detail/copy.h> #include <thrust/transform.h> #include <type_traits> using namespace ::cudf::io; using cudf::device_span; namespace cudf { namespace io { namespace csv { namespace gpu { /// Block dimension for dtype detection and conversion kernels constexpr uint32_t csvparse_block_dim = 128; /* * @brief Returns true is the input character is a valid digit. * Supports both decimal and hexadecimal digits (uppercase and lowercase). * * @param c Character to check * @param is_hex Whether to check as a hexadecimal * * @return `true` if it is digit-like, `false` otherwise */ __device__ __inline__ bool is_digit(char c, bool is_hex = false) { if (c >= '0' && c <= '9') return true; if (is_hex) { if (c >= 'A' && c <= 'F') return true; if (c >= 'a' && c <= 'f') return true; } return false; } /* * @brief Checks whether the given character counters indicate a potentially * valid date and/or time field. * * For performance and simplicity, we detect only the most common date * formats. Example formats that are detectable: * * `2001/02/30` * `2001-02-30 00:00:00` * `2/30/2001 T04:05:60.7` * `2 / 1 / 2011` * `02/January` * * @param len Number of non special-symbol or numeric characters * @param decimal_count Number of '.' characters * @param colon_count Number of ':' characters * @param dash_count Number of '-' characters * @param slash_count Number of '/' characters * * @return `true` if it is date-like, `false` otherwise */ __device__ __inline__ bool is_datetime( long len, long decimal_count, long colon_count, long dash_count, long slash_count) { // Must not exceed count of longest month (September) plus `T` time indicator if (len > 10) { return false; } // Must not exceed more than one decimals or more than two time separators if (decimal_count > 1 || colon_count > 2) { return false; } // Must have one or two '-' or '/' but not both as date separators if ((dash_count > 0 && dash_count < 3 && slash_count == 0) || (dash_count == 0 && slash_count > 0 && slash_count < 3)) { return true; } return false; } /* * @brief Returns true if the counters indicate a potentially valid float. * False positives are possible because positions are not taken into account. * For example, field "e.123-" would match the pattern. * * @param len Number of non special-symbol or numeric characters * @param digit_count Number of digits characters * @param decimal_count Number of occurrences of the decimal point character * @param thousands_count Number of occurrences of the thousands separator character * @param dash_count Number of '-' characters * @param exponent_count Number of 'e or E' characters * * @return `true` if it is floating point-like, `false` otherwise */ __device__ __inline__ bool is_floatingpoint(long len, long digit_count, long decimal_count, long thousands_count, long dash_count, long exponent_count) { // Can't have more than one exponent and one decimal point if (decimal_count > 1) return false; if (exponent_count > 1) return false; // Without the exponent or a decimal point, this is an integer, not a float if (decimal_count == 0 && exponent_count == 0) return false; // Can only have one '-' per component if (dash_count > 1 + exponent_count) return false; // If anything other than these characters is present, it's not a float if (digit_count + decimal_count + dash_count + exponent_count + thousands_count != len) { return false; } // Needs at least 1 digit, 2 if exponent is present if (digit_count < 1 + exponent_count) return false; return true; } /* * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed in one row/record at a time, so the number of total * threads (tid) is equal to the number of rows. * * @param opts A set of parsing options * @param csv_text The entire CSV data to read * @param column_flags Per-column parsing behavior flags * @param row_offsets The start the CSV data of interest * @param d_column_data The count for each column data type */ __global__ void __launch_bounds__(csvparse_block_dim) data_type_detection(parse_options_view const opts, device_span<char const> csv_text, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_offsets, device_span<column_type_histogram> d_column_data) { auto const raw_csv = csv_text.data(); // ThreadIds range per block, so also need the blockId // This is entry into the fields; threadId is an element within `num_records` long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) { return; } auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; // Going through all the columns of a given record while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts); // Checking if this is a column that the user wants --- user can filter columns if (column_flags[col] & column_parse::enabled) { // points to last character in the field auto const field_len = static_cast<size_t>(next_delimiter - field_start); if (serialized_trie_contains(opts.trie_na, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].null_count, 1); } else if (serialized_trie_contains(opts.trie_true, {field_start, field_len}) || serialized_trie_contains(opts.trie_false, {field_start, field_len})) { atomicAdd(&d_column_data[actual_col].bool_count, 1); } else if (cudf::io::is_infinity(field_start, next_delimiter)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { long count_number = 0; long count_decimal = 0; long count_thousands = 0; long count_slash = 0; long count_dash = 0; long count_plus = 0; long count_colon = 0; long count_string = 0; long count_exponent = 0; // Modify field_start & end to ignore whitespace and quotechars // This could possibly result in additional empty fields auto const trimmed_field_range = trim_whitespaces_quotes(field_start, next_delimiter); auto const trimmed_field_len = trimmed_field_range.second - trimmed_field_range.first; for (auto cur = trimmed_field_range.first; cur < trimmed_field_range.second; ++cur) { if (is_digit(*cur)) { count_number++; continue; } if (*cur == opts.decimal) { count_decimal++; continue; } if (*cur == opts.thousands) { count_thousands++; continue; } // Looking for unique characters that will help identify column types. switch (*cur) { case '-': count_dash++; break; case '+': count_plus++; break; case '/': count_slash++; break; case ':': count_colon++; break; case 'e': case 'E': if (cur > trimmed_field_range.first && cur < trimmed_field_range.second - 1) count_exponent++; break; default: count_string++; break; } } // Integers have to have the length of the string // Off by one if they start with a minus sign auto const int_req_number_cnt = trimmed_field_len - count_thousands - ((*trimmed_field_range.first == '-' || *trimmed_field_range.first == '+') && trimmed_field_len > 1); if (column_flags[col] & column_parse::as_datetime) { // PANDAS uses `object` dtype if the date is unparseable if (is_datetime(count_string, count_decimal, count_colon, count_dash, count_slash)) { atomicAdd(&d_column_data[actual_col].datetime_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } else if (count_number == int_req_number_cnt) { auto const is_negative = (*trimmed_field_range.first == '-'); auto const data_begin = trimmed_field_range.first + (is_negative || (*trimmed_field_range.first == '+')); cudf::size_type* ptr = cudf::io::gpu::infer_integral_field_counter( data_begin, data_begin + count_number, is_negative, d_column_data[actual_col]); atomicAdd(ptr, 1); } else if (is_floatingpoint(trimmed_field_len, count_number, count_decimal, count_thousands, count_dash + count_plus, count_exponent)) { atomicAdd(&d_column_data[actual_col].float_count, 1); } else { atomicAdd(&d_column_data[actual_col].string_count, 1); } } actual_col++; } next_field = next_delimiter + 1; field_start = next_field; col++; } } template <typename T, int base> __inline__ __device__ T decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::io::parse_numeric<T, base>(begin, end, opts); } template <typename T> __inline__ __device__ T decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::io::parse_numeric<T>(begin, end, opts); } template <> __inline__ __device__ cudf::timestamp_D decode_value(char const* begin, char const* end, parse_options_view const& opts) { return timestamp_D{cudf::duration_D{to_date(begin, end, opts.dayfirst)}}; } template <> __inline__ __device__ cudf::timestamp_s decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_s{cudf::duration_s{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_s{cudf::duration_s{milli / 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ms decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ms{cudf::duration_ms{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ms{cudf::duration_ms{milli}}; } } template <> __inline__ __device__ cudf::timestamp_us decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_us{cudf::duration_us{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_us{cudf::duration_us{milli * 1000}}; } } template <> __inline__ __device__ cudf::timestamp_ns decode_value(char const* begin, char const* end, parse_options_view const& opts) { auto milli = to_date_time(begin, end, opts.dayfirst); if (milli == -1) { return timestamp_ns{cudf::duration_ns{to_non_negative_integer<int64_t>(begin, end)}}; } else { return timestamp_ns{cudf::duration_ns{milli * 1000000}}; } } #ifndef DURATION_DECODE_VALUE #define DURATION_DECODE_VALUE(Type) \ template <> \ __inline__ __device__ Type decode_value( \ const char* begin, const char* end, parse_options_view const& opts) \ { \ return Type{to_time_delta<Type>(begin, end)}; \ } #endif DURATION_DECODE_VALUE(duration_D) DURATION_DECODE_VALUE(duration_s) DURATION_DECODE_VALUE(duration_ms) DURATION_DECODE_VALUE(duration_us) DURATION_DECODE_VALUE(duration_ns) // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::string_view decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::string_view{}; } // The purpose of this is merely to allow compilation ONLY template <> __inline__ __device__ cudf::dictionary32 decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::dictionary32{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::list_view decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::list_view{}; } // The purpose of this is merely to allow compilation ONLY // TODO : make this work for csv template <> __inline__ __device__ cudf::struct_view decode_value(char const* begin, char const* end, parse_options_view const& opts) { return cudf::struct_view{}; } /** * @brief Functor for converting CSV raw data to typed value. */ struct decode_op { /** * @brief Dispatch for numeric types whose values can be convertible to * 0 or 1 to represent boolean false/true, based upon checking against a * true/false values list. * * @return bool Whether the parsed value is valid. */ template <typename T, typename std::enable_if_t<std::is_integral_v<T> and !std::is_same_v<T, bool> and !cudf::is_fixed_point<T>()>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<T*>(out_buffer)[row] = [&flags, &opts, begin, end]() -> T { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return 1; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return 0; } return flags & column_parse::as_hexadecimal ? decode_value<T, 16>(begin, end, opts) : decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for fixed point types. * * @return bool Whether the parsed value is valid. */ template <typename T, typename std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type output_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<device_storage_type_t<T>*>(out_buffer)[row] = [&flags, &opts, output_type, begin, end]() -> device_storage_type_t<T> { return strings::detail::parse_decimal<device_storage_type_t<T>>( begin, end, output_type.scale()); }(); return true; } /** * @brief Dispatch for boolean type types. */ template <typename T, typename std::enable_if_t<std::is_same_v<T, bool>>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<T*>(out_buffer)[row] = [&opts, begin, end]() { // Check for user-specified true/false values auto const field_len = static_cast<size_t>(end - begin); if (serialized_trie_contains(opts.trie_true, {begin, field_len})) { return true; } if (serialized_trie_contains(opts.trie_false, {begin, field_len})) { return false; } return decode_value<T>(begin, end, opts); }(); return true; } /** * @brief Dispatch for floating points, which are set to NaN if the input * is not valid. In such case, the validity mask is set to zero too. */ template <typename T, typename std::enable_if_t<std::is_floating_point_v<T>>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { T const value = decode_value<T>(begin, end, opts); static_cast<T*>(out_buffer)[row] = value; return !std::isnan(value); } /** * @brief Dispatch for all other types. */ template <typename T, typename std::enable_if_t<!std::is_integral_v<T> and !std::is_floating_point_v<T> and !cudf::is_fixed_point<T>()>* = nullptr> __host__ __device__ __forceinline__ bool operator()(void* out_buffer, size_t row, const data_type, char const* begin, char const* end, parse_options_view const& opts, column_parse::flags flags) { static_cast<T*>(out_buffer)[row] = decode_value<T>(begin, end, opts); return true; } }; /** * @brief CUDA kernel that parses and converts CSV data into cuDF column data. * * Data is processed one record at a time * * @param[in] options A set of parsing options * @param[in] data The entire CSV data to read * @param[in] column_flags Per-column parsing behavior flags * @param[in] row_offsets The start the CSV data of interest * @param[in] dtypes The data type of the column * @param[out] columns The output column data * @param[out] valids The bitmaps indicating whether column fields are valid */ __global__ void __launch_bounds__(csvparse_block_dim) convert_csv_to_cudf(cudf::io::parse_options_view options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void* const> columns, device_span<cudf::bitmask_type* const> valids) { auto const raw_csv = data.data(); // thread IDs range per block, so also need the block id. // this is entry into the field array - tid is an elements within the num_entries array long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x); long const rec_id_next = rec_id + 1; // we can have more threads than data, make sure we are not past the end of // the data if (rec_id_next >= row_offsets.size()) return; auto field_start = raw_csv + row_offsets[rec_id]; auto const row_end = raw_csv + row_offsets[rec_id_next]; auto next_field = field_start; int col = 0; int actual_col = 0; while (col < column_flags.size() && field_start <= row_end) { auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options); if (column_flags[col] & column_parse::enabled) { // check if the entire field is a NaN string - consistent with pandas auto const is_valid = !serialized_trie_contains( options.trie_na, {field_start, static_cast<size_t>(next_delimiter - field_start)}); // Modify field_start & end to ignore whitespace and quotechars auto field_end = next_delimiter; if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) { auto const trimmed_field = trim_whitespaces_quotes(field_start, field_end, options.quotechar); field_start = trimmed_field.first; field_end = trimmed_field.second; } if (is_valid) { // Type dispatcher does not handle STRING if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto end = next_delimiter; if (options.keepquotes == false) { if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) { ++field_start; --end; } } auto str_list = static_cast<std::pair<const char*, size_t>*>(columns[actual_col]); str_list[rec_id].first = field_start; str_list[rec_id].second = end - field_start; } else { if (cudf::type_dispatcher(dtypes[actual_col], decode_op{}, columns[actual_col], rec_id, dtypes[actual_col], field_start, field_end, options, column_flags[col])) { // set the valid bitmap - all bits were set to 0 to start set_bit(valids[actual_col], rec_id); } } } else if (dtypes[actual_col].id() == cudf::type_id::STRING) { auto str_list = static_cast<std::pair<const char*, size_t>*>(columns[actual_col]); str_list[rec_id].first = nullptr; str_list[rec_id].second = 0; } ++actual_col; } next_field = next_delimiter + 1; field_start = next_field; ++col; } } /* * @brief Merge two packed row contexts (each corresponding to a block of characters) * and return the packed row context corresponding to the merged character block */ inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx, packed_rowctx_t second_ctx) { uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3; uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3; uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3; return (first_ctx & ~pack_row_contexts(3, 3, 3)) + pack_row_contexts(get_row_context(second_ctx, id0), get_row_context(second_ctx, id1), get_row_context(second_ctx, id2)); } /* * @brief Per-character context: * 1-bit count (0 or 1) per context in the lower 4 bits * 2-bit output context id per input context in bits 8..15 */ constexpr __device__ uint32_t make_char_context(uint32_t id0, uint32_t id1, uint32_t id2 = ROW_CTX_COMMENT, uint32_t c0 = 0, uint32_t c1 = 0, uint32_t c2 = 0) { return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) | (c2 << 2); } /* * @brief Merge a 1-character context to keep track of bitmasks where new rows occur * Merges a single-character "block" row context at position pos with the current * block's row context (the current block contains 32-pos characters) * * @param ctx Current block context and new rows bitmaps * @param char_ctx state transitions associated with new character * @param pos Position within the current 32-character block * * NOTE: This is probably the most performance-critical piece of the row gathering kernel. * The char_ctx value should be created via make_char_context, and its value should * have been evaluated at compile-time. */ inline __device__ void merge_char_context(uint4& ctx, uint32_t char_ctx, uint32_t pos) { uint32_t id0 = (ctx.w >> 0) & 3; uint32_t id1 = (ctx.w >> 2) & 3; uint32_t id2 = (ctx.w >> 4) & 3; // Set the newrow bit in the bitmap at the corresponding position ctx.x |= ((char_ctx >> id0) & 1) << pos; ctx.y |= ((char_ctx >> id1) & 1) << pos; ctx.z |= ((char_ctx >> id2) & 1) << pos; // Update the output context ids ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) | ((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6); } /* * Convert the context-with-row-bitmaps version to a packed row context */ inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map) { return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3), make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3), make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3)); } /* * Selects the row bitmap corresponding to the given parser state */ inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid) { return (ctxid == ROW_CTX_NONE) ? ctx_map.x : (ctxid == ROW_CTX_QUOTE) ? ctx_map.y : (ctxid == ROW_CTX_COMMENT) ? ctx_map.z : 0; } /** * @brief Single pair-wise 512-wide row context merge transform * * Merge row context blocks and record the merge operation in a context * tree so that the transform is reversible. * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * @tparam lanemask mask to specify source of packed row context * @tparam tmask mask to specify principle thread for merging row context * @tparam base start location for writing into packed row context tree * @tparam level_scale level of the node in the tree * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale> inline __device__ void ctx_merge(uint64_t* ctxtree, packed_rowctx_t* ctxb, uint32_t t) { uint64_t tmp = shuffle_xor(*ctxb, lanemask); if (!(t & tmask)) { *ctxb = merge_row_contexts(*ctxb, tmp); ctxtree[base + (t >> level_scale)] = *ctxb; } } /** * @brief Single 512-wide row context inverse merge transform * * Walks the context tree starting from a root node * * @tparam rmask Mask to specify which threads write input row context * @param[in] base Start read location of the merge transform tree * @param[in] ctxtree Merge transform tree * @param[in] ctx Input context * @param[in] brow4 output row in block *4 * @param[in] t thread id (leaf node id) */ template <uint32_t rmask> inline __device__ void ctx_unmerge( uint32_t base, uint64_t* ctxtree, uint32_t* ctx, uint32_t* brow4, uint32_t t) { rowctx32_t ctxb_left, ctxb_right, ctxb_sum; ctxb_sum = get_row_context(ctxtree[base], *ctx); ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx); ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3); if (t & (rmask)) { *brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3); *ctx = ctxb_left & 3; } } /* * @brief 512-wide row context merge transform * * Repeatedly merge row context blocks, keeping track of each merge operation * in a context tree so that the transform is reversible * The tree is organized such that the left and right children of node n * are located at indices n*2 and n*2+1, the root node starting at index 1 * * Each node contains the counts and output contexts corresponding to the * possible input contexts. * Each parent node's count is obtained by adding the corresponding counts * from the left child node with the right child node's count selected from * the left child node's output context: * parent.count[k] = left.count[k] + right.count[left.outctx[k]] * parent.outctx[k] = right.outctx[left.outctx[k]] * * @param ctxtree[out] packed row context tree * @param ctxb[in] packed row context for the current character block * @param t thread id (leaf node id) */ static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024], packed_rowctx_t ctxb, uint32_t t) { ctxtree[512 + t] = ctxb; ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t); __syncthreads(); if (t < 32) { ctxb = ctxtree[32 + t]; ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t); ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t); ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t); ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t); // Final stage uint64_t tmp = shuffle_xor(ctxb, 16); if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); } } } /* * @brief 512-wide row context inverse merge transform * * Walks the context tree starting from the root node (index 1) using * the starting context in node index 0. * The return value is the starting row and input context for the given leaf node * * @param[in] ctxtree Merge transform tree * @param[in] t thread id (leaf node id) * * @return Final row context and count (row_position*4 + context_id format) */ static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024], uint32_t t) { uint32_t ctx = ctxtree[0] & 3; // Starting input context rowctx32_t brow4 = 0; // output row in block *4 ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t); ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t); ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t); ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t); ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t); ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t); ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t); ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t); ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t); return brow4 + ctx; } /** * @brief Gather row offsets from CSV character data split into 16KB chunks * * This is done in two phases: the first phase returns the possible row counts * per 16K character block for each possible parsing context at the start of the block, * along with the resulting parsing context at the end of the block. * The caller can then compute the actual parsing context at the beginning of each * individual block and total row count. * The second phase outputs the location of each row in the block, using the parsing * context and initial row counter accumulated from the results of the previous phase. * Row parsing context will be updated after phase 2 such that the value contains * the number of rows starting at byte_range_end or beyond. * * @param row_ctx Row parsing context (output of phase 1 or input to phase 2) * @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2) * @param data Base pointer of character data (all row offsets are relative to this) * @param chunk_size Total number of characters to parse * @param parse_pos Current parsing position in the file * @param start_offset Position of the start of the character buffer in the file * @param data_size CSV file size * @param byte_range_start Ignore rows starting before this position in the file * @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx * @param skip_rows Number of rows to skip (ignored in phase 1) * @param terminator Line terminator character * @param delimiter Column delimiter character * @param quotechar Quote character * @param escapechar Delimiter escape character * @param commentchar Comment line character (skip rows starting with this character) */ __global__ void __launch_bounds__(rowofs_block_dim) gather_row_offsets_gpu(uint64_t* row_ctx, device_span<uint64_t> offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, int terminator, int delimiter, int quotechar, int escapechar, int commentchar) { auto start = data.begin(); using block_reduce = typename cub::BlockReduce<uint32_t, rowofs_block_dim>; __shared__ union { typename block_reduce::TempStorage bk_storage; __align__(8) uint64_t ctxtree[rowofs_block_dim * 2]; } temp_storage; const char* end = start + (min(parse_pos + chunk_size, data_size) - start_offset); uint32_t t = threadIdx.x; size_t block_pos = (parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32; const char* cur = start + block_pos; // Initial state is neutral context (no state transitions), zero rows uint4 ctx_map = { .x = 0, .y = 0, .z = 0, .w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)}; int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator; // Loop through all 32 bytes and keep a bitmask of row starts for each possible input context for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) { uint32_t ctx; if (cur < end) { c = cur[0]; if (c_prev == terminator) { if (c == commentchar) { // Start of a new comment row ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1); } else if (c == quotechar) { // Quoted string on newrow, or quoted string ending in terminator ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1); } else { // Start of a new row unless within a quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1); } } else if (c == quotechar) { if (c_prev == delimiter || c_prev == quotechar) { // Quoted string after delimiter, quoted string ending in delimiter, or double-quote ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE); } else { // Closing or ignored quote ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE); } } else { // Neutral character ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE); } } else { const char* data_end = start + data_size - start_offset; if (cur <= end && cur == data_end) { // Add a newline at data end (need the extra row offset to infer length of previous row) ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1); } else { // Pass-through context (beyond chunk_size or data_end) ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT); } } // Merge with current context, keeping track of where new rows occur merge_char_context(ctx_map, ctx, pos); } // Eliminate rows that start before byte_range_start if (start_offset + block_pos < byte_range_start) { uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31)); uint32_t mask = 0xfffffffe << dist_minus1; ctx_map.x &= mask; ctx_map.y &= mask; ctx_map.z &= mask; } // Convert the long-form {rowmap,outctx}[inctx] version into packed version // {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into // a single 16K-character block context rowctx_merge_transform(temp_storage.ctxtree, pack_rowmaps(ctx_map), t); // If this is the second phase, get the block's initial parser state and row counter if (offsets_out.data()) { if (t == 0) { temp_storage.ctxtree[0] = row_ctx[blockIdx.x]; } __syncthreads(); // Walk back the transform tree with the known initial parser state rowctx32_t ctx = rowctx_inverse_merge_transform(temp_storage.ctxtree, t); uint64_t row = (temp_storage.ctxtree[0] >> 2) + (ctx >> 2); uint32_t rows_out_of_range = 0; uint32_t rowmap = select_rowmap(ctx_map, ctx & 3); // Output row positions while (rowmap != 0) { uint32_t pos = __ffs(rowmap); block_pos += pos; if (row >= skip_rows && row - skip_rows < offsets_out.size()) { // Output byte offsets are relative to the base of the input buffer offsets_out[row - skip_rows] = block_pos - 1; rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end); } row++; rowmap >>= pos; } __syncthreads(); // Return the number of rows out of range rows_out_of_range = block_reduce(temp_storage.bk_storage).Sum(rows_out_of_range); if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; } } else { // Just store the row counts and output contexts if (t == 0) { row_ctx[blockIdx.x] = temp_storage.ctxtree[1]; } } } size_t __host__ count_blank_rows(const cudf::io::parse_options_view& opts, device_span<char const> data, device_span<uint64_t const> row_offsets, rmm::cuda_stream_view stream) { const auto newline = opts.skipblanklines ? opts.terminator : opts.comment; const auto comment = opts.comment != '\0' ? opts.comment : newline; const auto carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment; return thrust::count_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != data.size()) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); } device_span<uint64_t> __host__ remove_blank_rows(cudf::io::parse_options_view const& options, device_span<char const> data, device_span<uint64_t> row_offsets, rmm::cuda_stream_view stream) { size_t d_size = data.size(); const auto newline = options.skipblanklines ? options.terminator : options.comment; const auto comment = options.comment != '\0' ? options.comment : newline; const auto carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment; auto new_end = thrust::remove_if( rmm::exec_policy(stream), row_offsets.begin(), row_offsets.end(), [data = data, d_size, newline, comment, carriage] __device__(const uint64_t pos) { return ((pos != d_size) && (data[pos] == newline || data[pos] == comment || data[pos] == carriage)); }); return row_offsets.subspan(0, new_end - row_offsets.begin()); } std::vector<column_type_histogram> detect_column_types( cudf::io::parse_options_view const& options, device_span<char const> const data, device_span<column_parse::flags const> const column_flags, device_span<uint64_t const> const row_starts, size_t const num_active_columns, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count const int block_size = csvparse_block_dim; const int grid_size = (row_starts.size() + block_size - 1) / block_size; auto d_stats = detail::make_zeroed_device_uvector_async<column_type_histogram>(num_active_columns, stream); data_type_detection<<<grid_size, block_size, 0, stream.value()>>>( options, data, column_flags, row_starts, d_stats); return detail::make_std_vector_sync(d_stats, stream); } void __host__ decode_row_column_data(cudf::io::parse_options_view const& options, device_span<char const> data, device_span<column_parse::flags const> column_flags, device_span<uint64_t const> row_offsets, device_span<cudf::data_type const> dtypes, device_span<void* const> columns, device_span<cudf::bitmask_type* const> valids, rmm::cuda_stream_view stream) { // Calculate actual block count to use based on records count auto const block_size = csvparse_block_dim; auto const num_rows = row_offsets.size() - 1; auto const grid_size = (num_rows + block_size - 1) / block_size; convert_csv_to_cudf<<<grid_size, block_size, 0, stream.value()>>>( options, data, column_flags, row_offsets, dtypes, columns, valids); } uint32_t __host__ gather_row_offsets(const parse_options_view& options, uint64_t* row_ctx, device_span<uint64_t> const offsets_out, device_span<char const> const data, size_t chunk_size, size_t parse_pos, size_t start_offset, size_t data_size, size_t byte_range_start, size_t byte_range_end, size_t skip_rows, rmm::cuda_stream_view stream) { uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes); gather_row_offsets_gpu<<<dim_grid, rowofs_block_dim, 0, stream.value()>>>( row_ctx, offsets_out, data, chunk_size, parse_pos, start_offset, data_size, byte_range_start, byte_range_end, skip_rows, options.terminator, options.delimiter, (options.quotechar) ? options.quotechar : 0x100, /*(options.escapechar) ? options.escapechar :*/ 0x100, (options.comment) ? options.comment : 0x100); return dim_grid; } } // namespace gpu } // namespace csv } // namespace io } // namespace cudf
215ef2da0a0749c6317131365bfbae45aa36ffff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> __global__ void MatrixMultKernel(const int m, const int n, const int k, const float *dev_A, const float *dev_B,float *dev_C) { int iR = blockIdx.y*blockDim.y + threadIdx.y; int iC = blockIdx.x*blockDim.x + threadIdx.x; if ((iR < m) && (iC < k)) { float result = 0; for (int i = 0; i < n; ++i) { result += dev_A[iR*n + i] * dev_B[iC + i*k]; } dev_C[iR*k + iC] = result; } } hipError_t MatrixMultWithCuda(int m, int n, int k, float* C, const float *A, const float *B); /* m is the number of rows of Matrix A n is the number of rows in Matrix B and the number of Columns in Matrix A k is the number of rows in Matrix B Finally the result of the multiplcation is in Matrix C which has number rows = m and number of columns = k Summary: A mxn B nxk C mxk */ void MatrixMult_Sequential(int m, int n, int k, float* A, float* B, float* C) { for (int row = 0; row < m; ++row) { for (int col = 0; col < k; ++col) { float sum = 0; for (int i = 0; i < n; ++i) { float a = A[row*n + i]; float b = B[col + i*k]; sum += a*b; } C[row*k + col] = sum; } } } float* generateArrayWithSize(int x,int y) { float* arr; for (int i = 0; i < x*y; i++) { arr[i] = rand() % 250; } return arr; } int main() { int m = 1000, n =1000, k = 1000 ; float*A= (float*)malloc(m*n * sizeof(float)); for (int i = 0; i <m&n; i++) { A[i] = rand() % 50+1; } float*B = (float*)malloc(n*k * sizeof(float)); for (int i = 0; i <m&n; i++) { B[i] = rand() % 50 +1; } float*C = (float*)malloc(m*k*sizeof(float)); printf("\n\n\nMatrix Multiplication With Cuda\n\n"); clock_t start_t_Cuda, end_t_Cuda; double totaltime_Cuda; start_t_Cuda = clock(); // Add vectors in parallel. hipError_t cudaStatus = MatrixMultWithCuda(m, n, k, C,A, B); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } /* printf("Matrix A\n"); for (int i = 0; i < m*n; i++) { printf("%f ", A[i]); if ((i+1)%n == 0) { printf("\n"); } } printf("Matrix B\n"); for (int i = 0; i < n*k; i++) { printf("%f ", B[i]); if ((i+1)%k == 0) { printf("\n"); } } printf("Matrix C\n"); for (int i = 0; i < m*k; i++) { printf("%f ", C[i]); if ((i + 1) %k == 0) { printf("\n"); } } */ end_t_Cuda = clock(); totaltime_Cuda = (double)(end_t_Cuda - start_t_Cuda) / CLOCKS_PER_SEC; printf("total time: %f seconds\n", totaltime_Cuda); printf("\n\n\nMatrix Multiplication using Sequential C Code\n\n\n"); clock_t start_t_seq, end_t_seq; double totaltime_seq; start_t_seq = clock(); MatrixMult_Sequential(m, n, k, A, B, C); /* printf("Matrix A\n"); for (int i = 0; i < m*n; i++) { printf("%f ", A[i]); if ((i + 1) %n == 0) { printf("\n"); } } printf("Matrix B\n"); for (int i = 0; i < n*k; i++) { printf("%f ", B[i]); if ((i + 1) %k == 0) { printf("\n"); } } printf("Matrix C\n"); for (int i = 0; i < m*k; i++) { printf("%f ", C[i]); if ((i + 1) %k == 0) { printf("\n"); } } */ end_t_seq = clock(); totaltime_seq = (double)(end_t_seq - start_t_seq) / CLOCKS_PER_SEC; printf("total time: %f seconds\n", totaltime_seq); return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t MatrixMultWithCuda(int m,int n,int k,float* C, const float *A, const float *B) { float *dev_A = 0; float *dev_B = 0; float *dev_C = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_C, m*k* sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_A, m*n * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_B, n*k * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_A, A, m*n * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_B, B, n*k * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } dim3 dimGrid(64, 64, 1); dim3 dimBlock((m-1)/64+1, (k-1)/64+1, 1); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( MatrixMultKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, m,n,k, dev_A, dev_B,dev_C); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(C, dev_C, m*k * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_C); hipFree(dev_B); hipFree(dev_A); return cudaStatus; }
215ef2da0a0749c6317131365bfbae45aa36ffff.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> __global__ void MatrixMultKernel(const int m, const int n, const int k, const float *dev_A, const float *dev_B,float *dev_C) { int iR = blockIdx.y*blockDim.y + threadIdx.y; int iC = blockIdx.x*blockDim.x + threadIdx.x; if ((iR < m) && (iC < k)) { float result = 0; for (int i = 0; i < n; ++i) { result += dev_A[iR*n + i] * dev_B[iC + i*k]; } dev_C[iR*k + iC] = result; } } cudaError_t MatrixMultWithCuda(int m, int n, int k, float* C, const float *A, const float *B); /* m is the number of rows of Matrix A n is the number of rows in Matrix B and the number of Columns in Matrix A k is the number of rows in Matrix B Finally the result of the multiplcation is in Matrix C which has number rows = m and number of columns = k Summary: A mxn B nxk C mxk */ void MatrixMult_Sequential(int m, int n, int k, float* A, float* B, float* C) { for (int row = 0; row < m; ++row) { for (int col = 0; col < k; ++col) { float sum = 0; for (int i = 0; i < n; ++i) { float a = A[row*n + i]; float b = B[col + i*k]; sum += a*b; } C[row*k + col] = sum; } } } float* generateArrayWithSize(int x,int y) { float* arr; for (int i = 0; i < x*y; i++) { arr[i] = rand() % 250; } return arr; } int main() { int m = 1000, n =1000, k = 1000 ; float*A= (float*)malloc(m*n * sizeof(float)); for (int i = 0; i <m&n; i++) { A[i] = rand() % 50+1; } float*B = (float*)malloc(n*k * sizeof(float)); for (int i = 0; i <m&n; i++) { B[i] = rand() % 50 +1; } float*C = (float*)malloc(m*k*sizeof(float)); printf("\n\n\nMatrix Multiplication With Cuda\n\n"); clock_t start_t_Cuda, end_t_Cuda; double totaltime_Cuda; start_t_Cuda = clock(); // Add vectors in parallel. cudaError_t cudaStatus = MatrixMultWithCuda(m, n, k, C,A, B); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } /* printf("Matrix A\n"); for (int i = 0; i < m*n; i++) { printf("%f ", A[i]); if ((i+1)%n == 0) { printf("\n"); } } printf("Matrix B\n"); for (int i = 0; i < n*k; i++) { printf("%f ", B[i]); if ((i+1)%k == 0) { printf("\n"); } } printf("Matrix C\n"); for (int i = 0; i < m*k; i++) { printf("%f ", C[i]); if ((i + 1) %k == 0) { printf("\n"); } } */ end_t_Cuda = clock(); totaltime_Cuda = (double)(end_t_Cuda - start_t_Cuda) / CLOCKS_PER_SEC; printf("total time: %f seconds\n", totaltime_Cuda); printf("\n\n\nMatrix Multiplication using Sequential C Code\n\n\n"); clock_t start_t_seq, end_t_seq; double totaltime_seq; start_t_seq = clock(); MatrixMult_Sequential(m, n, k, A, B, C); /* printf("Matrix A\n"); for (int i = 0; i < m*n; i++) { printf("%f ", A[i]); if ((i + 1) %n == 0) { printf("\n"); } } printf("Matrix B\n"); for (int i = 0; i < n*k; i++) { printf("%f ", B[i]); if ((i + 1) %k == 0) { printf("\n"); } } printf("Matrix C\n"); for (int i = 0; i < m*k; i++) { printf("%f ", C[i]); if ((i + 1) %k == 0) { printf("\n"); } } */ end_t_seq = clock(); totaltime_seq = (double)(end_t_seq - start_t_seq) / CLOCKS_PER_SEC; printf("total time: %f seconds\n", totaltime_seq); return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t MatrixMultWithCuda(int m,int n,int k,float* C, const float *A, const float *B) { float *dev_A = 0; float *dev_B = 0; float *dev_C = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_C, m*k* sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_A, m*n * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_B, n*k * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_A, A, m*n * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_B, B, n*k * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } dim3 dimGrid(64, 64, 1); dim3 dimBlock((m-1)/64+1, (k-1)/64+1, 1); // Launch a kernel on the GPU with one thread for each element. MatrixMultKernel <<<dimGrid, dimBlock>>>(m,n,k, dev_A, dev_B,dev_C); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(C, dev_C, m*k * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_C); cudaFree(dev_B); cudaFree(dev_A); return cudaStatus; }
a50b14647c335d0412ae77b910c4009fe6e17990.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel4_plus_2_a [3][2]; static int dims_update_halo_kernel4_plus_2_a_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel4_plus_2_a_gpu(ACC<double> &vol_flux_y, ACC<double> &mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = vol_flux_y(2,0,0); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = mass_flux_y(2,0,0); } __global__ void ops_update_halo_kernel4_plus_2_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] * dims_update_halo_kernel4_plus_2_a[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] * dims_update_halo_kernel4_plus_2_a[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel4_plus_2_a[0][0], dims_update_halo_kernel4_plus_2_a[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel4_plus_2_a[1][0], dims_update_halo_kernel4_plus_2_a[1][1], arg1); update_halo_kernel4_plus_2_a_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,77)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(77,"update_halo_kernel4_plus_2_a"); OPS_kernels[77].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel4_plus_2_a_h[0][0] || ydim0 != dims_update_halo_kernel4_plus_2_a_h[0][1] || xdim1 != dims_update_halo_kernel4_plus_2_a_h[1][0] || ydim1 != dims_update_halo_kernel4_plus_2_a_h[1][1]) { dims_update_halo_kernel4_plus_2_a_h[0][0] = xdim0; dims_update_halo_kernel4_plus_2_a_h[0][1] = ydim0; dims_update_halo_kernel4_plus_2_a_h[1][0] = xdim1; dims_update_halo_kernel4_plus_2_a_h[1][1] = ydim1; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel4_plus_2_a, dims_update_halo_kernel4_plus_2_a_h, sizeof(dims_update_halo_kernel4_plus_2_a))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[77].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[77].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[77].mpi_time += t2-t1; OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 77; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 77; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_a_execute; if (OPS_diags > 1) { ops_timing_realloc(77,"update_halo_kernel4_plus_2_a"); } ops_enqueue_kernel(desc); } #endif
a50b14647c335d0412ae77b910c4009fe6e17990.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel4_plus_2_a [3][2]; static int dims_update_halo_kernel4_plus_2_a_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel4_plus_2_a_gpu(ACC<double> &vol_flux_y, ACC<double> &mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = vol_flux_y(2,0,0); if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = mass_flux_y(2,0,0); } __global__ void ops_update_halo_kernel4_plus_2_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[0][0] * dims_update_halo_kernel4_plus_2_a[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_plus_2_a[1][0] * dims_update_halo_kernel4_plus_2_a[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel4_plus_2_a[0][0], dims_update_halo_kernel4_plus_2_a[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel4_plus_2_a[1][0], dims_update_halo_kernel4_plus_2_a[1][1], arg1); update_halo_kernel4_plus_2_a_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,77)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(77,"update_halo_kernel4_plus_2_a"); OPS_kernels[77].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel4_plus_2_a_h[0][0] || ydim0 != dims_update_halo_kernel4_plus_2_a_h[0][1] || xdim1 != dims_update_halo_kernel4_plus_2_a_h[1][0] || ydim1 != dims_update_halo_kernel4_plus_2_a_h[1][1]) { dims_update_halo_kernel4_plus_2_a_h[0][0] = xdim0; dims_update_halo_kernel4_plus_2_a_h[0][1] = ydim0; dims_update_halo_kernel4_plus_2_a_h[1][0] = xdim1; dims_update_halo_kernel4_plus_2_a_h[1][1] = ydim1; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel4_plus_2_a, dims_update_halo_kernel4_plus_2_a_h, sizeof(dims_update_halo_kernel4_plus_2_a))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[77].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel4_plus_2_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[77].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[77].mpi_time += t2-t1; OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[77].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 77; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 77; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_a_execute; if (OPS_diags > 1) { ops_timing_realloc(77,"update_halo_kernel4_plus_2_a"); } ops_enqueue_kernel(desc); } #endif
0493bdc8e1bbddca113be4675a5b9e5c8faf4191.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int h_aa[90000]; int m=256, n=256, k=256; for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_aa[i * n + j] = rand() % 1024; } } printf("With Shared Memory\n"); for(int ii=0; ii<2; ii++) { /* Fixed seed for illustration */ srand(3333); //printf("please type in m n and k\n"); //scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; hipHostMalloc((void **) &h_a, sizeof(int)*m*n); hipHostMalloc((void **) &h_b, sizeof(int)*n*k); hipHostMalloc((void **) &h_c, sizeof(int)*m*k); hipHostMalloc((void **) &h_cc, sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = h_aa[i * n + j]; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = h_aa[i * k + j]; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // start to count execution time of GPU version hipEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; hipMalloc((void **) &d_a, sizeof(int)*m*n); hipMalloc((void **) &d_b, sizeof(int)*n*k); hipMalloc((void **) &d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory hipMemcpy(d_a, h_a, sizeof(int)*m*n, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, sizeof(int)*n*k, hipMemcpyHostToDevice); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel if(ii==0) { printf("shared memory\n"); hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, n); } else { printf("no shared memory\n"); hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, m, n, k); } // Transefr results from device to host hipMemcpy(h_c, d_c, sizeof(int)*m*k, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // time counting terminate hipEventRecord(stop, 0); hipEventSynchronize(stop); // compute time elapse on GPU computing hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version hipEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if(h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if(all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); } // free memory hipFree(d_a); hipFree(d_b); hipFree(d_c); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); hipHostFree(h_cc); } return 0; }
0493bdc8e1bbddca113be4675a5b9e5c8faf4191.cu
/* * file name: matrix.cu * * matrix.cu contains the code that realize some common used matrix operations in CUDA * * this is a toy program for learning CUDA, some functions are reusable in other project * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #define BLOCK_SIZE 16 /* ********************************************************************* function name: gpu_matrix_mult description: dot product of two matrix (not only square) parameters: &a GPU device pointer to a m X n matrix (A) &b GPU device pointer to a n X k matrix (B) &c GPU device output purpose pointer to a m X k matrix (C) to store the result Note: grid and block should be configured as: dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); further sppedup can be obtained by using shared memory to decrease global memory access times return: none ********************************************************************* */ __global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } /* ********************************************************************* function name: gpu_square_matrix_mult description: dot product of two matrix (not only square) in GPU parameters: &a GPU device pointer to a n X n matrix (A) &b GPU device pointer to a n X n matrix (B) &c GPU device output purpose pointer to a n X n matrix (C) to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * BLOCK_SIZE + threadIdx.x; if(idx >= n*n) { // n may not divisible by BLOCK_SIZE tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < BLOCK_SIZE; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { d_result[row * n + col] = tmp; } } /* ********************************************************************* function name: gpu_matrix_transpose description: matrix transpose parameters: &mat_in GPU device pointer to a rows X cols matrix &mat_out GPU device output purpose pointer to a cols X rows matrix to store the result Note: grid and block should be configured as: dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1); dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1); return: none ********************************************************************* */ __global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } } /* ********************************************************************* function name: cpu_matrix_mult description: dot product of two matrix (not only square) in CPU, for validating GPU results parameters: &a CPU host pointer to a m X n matrix (A) &b CPU host pointer to a n X k matrix (B) &c CPU host output purpose pointer to a m X k matrix (C) to store the result return: none ********************************************************************* */ void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } /* ********************************************************************* function name: main description: test and compare parameters: none return: none ********************************************************************* */ int main(int argc, char const *argv[]) { int h_aa[90000]; int m=256, n=256, k=256; for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_aa[i * n + j] = rand() % 1024; } } printf("With Shared Memory\n"); for(int ii=0; ii<2; ii++) { /* Fixed seed for illustration */ srand(3333); //printf("please type in m n and k\n"); //scanf("%d %d %d", &m, &n, &k); // allocate memory in host RAM, h_cc is used to store CPU result int *h_a, *h_b, *h_c, *h_cc; cudaMallocHost((void **) &h_a, sizeof(int)*m*n); cudaMallocHost((void **) &h_b, sizeof(int)*n*k); cudaMallocHost((void **) &h_c, sizeof(int)*m*k); cudaMallocHost((void **) &h_cc, sizeof(int)*m*k); // random initialize matrix A for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = h_aa[i * n + j]; } } // random initialize matrix B for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = h_aa[i * k + j]; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; // some events to count the execution time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start to count execution time of GPU version cudaEventRecord(start, 0); // Allocate memory space on the device int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int)*m*n); cudaMalloc((void **) &d_b, sizeof(int)*n*k); cudaMalloc((void **) &d_c, sizeof(int)*m*k); // copy matrix A and B from host to device memory cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // Launch kernel if(ii==0) { printf("shared memory\n"); gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n); } else { printf("no shared memory\n"); gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k); } // Transefr results from device to host cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); // time counting terminate cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // compute time elapse on GPU computing cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); // start the CPU version cudaEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); // validate results computed by GPU int all_ok = 1; for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { //printf("[%d][%d]:%d == [%d][%d]:%d, ", i, j, h_cc[i*k + j], i, j, h_c[i*k + j]); if(h_cc[i*k + j] != h_c[i*k + j]) { all_ok = 0; } } //printf("\n"); } // roughly compute speedup if(all_ok) { printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms); } else { printf("incorrect results\n"); } // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_cc); } return 0; }
4b922ddfdf5eb6855db36a3f2baef0b24db392eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void mykernel(void) { printf("Hellow World\n"); } __global__ void vecadd_kernel(int* a, int* b, int* c, int N) { //int i = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { c[i] = a[i] + b[i]; } } void vecadd(int* a, int* b, int* c, int N) { //vecadd_kernel << < 1, N >> > (a, b, c, N); vecadd_kernel << < (N+1023)/1024/1024, 1024 >> > (a, b, c, N); hipDeviceSynchronize(); //for (int i = 0; i < N; i++) { //vecadd_kernel(a, b, c, N, i); //} } int main(void) { int N = 512; int* a, * b, * c; hipMallocManaged(&a, N * sizeof(int)); hipMallocManaged(&b, N * sizeof(int)); hipMallocManaged(&c, N * sizeof(int)); //a = (int*)malloc(N * sizeof(int)); //b = (int*)malloc(N * sizeof(int)); //c = (int*)malloc(N * sizeof(int)); vecadd(a, b, c, N); //mykernel <<<1, 1 >>> (); //hipDeviceSynchronize(); hipFree(a); hipFree(b); hipFree(c); return 0; }
4b922ddfdf5eb6855db36a3f2baef0b24db392eb.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void mykernel(void) { printf("Hellow World\n"); } __global__ void vecadd_kernel(int* a, int* b, int* c, int N) { //int i = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { c[i] = a[i] + b[i]; } } void vecadd(int* a, int* b, int* c, int N) { //vecadd_kernel << < 1, N >> > (a, b, c, N); vecadd_kernel << < (N+1023)/1024/1024, 1024 >> > (a, b, c, N); cudaDeviceSynchronize(); //for (int i = 0; i < N; i++) { //vecadd_kernel(a, b, c, N, i); //} } int main(void) { int N = 512; int* a, * b, * c; cudaMallocManaged(&a, N * sizeof(int)); cudaMallocManaged(&b, N * sizeof(int)); cudaMallocManaged(&c, N * sizeof(int)); //a = (int*)malloc(N * sizeof(int)); //b = (int*)malloc(N * sizeof(int)); //c = (int*)malloc(N * sizeof(int)); vecadd(a, b, c, N); //mykernel <<<1, 1 >>> (); //cudaDeviceSynchronize(); cudaFree(a); cudaFree(b); cudaFree(c); return 0; }
0bea4da48775508296641db9d4e99fe969eed300.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_successor_generator.cuh" #include <algorithm> #include <iostream> #include <memory> #include <queue> #include <string> #include <vector> #include <hiprand/hiprand_kernel.h> #include "cuda_common/cuda_check.cuh" #include "cuda_common/cuda_random.cuh" #include "cuda_sas_plus.cuh" #include "sas_plus.h" #include "successor_generator.h" using namespace pplanner; std::queue<std::string> ExampleSASPlusLines(); __global__ void CountKernel(const CudaSuccessorGenerator generator, const CudaSASPlus problem, const int *state, int *result) { *result = Count(generator, problem, state); } __global__ void GenerateKernel(const CudaSuccessorGenerator generator, const CudaSASPlus problem, const int *state, int *result) { Generate(generator, problem, state, result); } void GenerateTest(const CudaSuccessorGenerator &generator, const CudaSASPlus &problem) { int *cuda_count = nullptr; CUDA_CHECK(hipMalloc((void**)&cuda_count, sizeof(int))); std::vector<int> state{0, 1, 0}; int *cuda_state = nullptr; CudaMallocAndCopy((void**)&cuda_state, state.data(), 3 * sizeof(int)); hipLaunchKernelGGL(( CountKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_count); int count = 0; CUDA_CHECK(hipMemcpy(&count, cuda_count, sizeof(int), hipMemcpyDeviceToHost)); assert(2 == count); int *cuda_result = nullptr; CUDA_CHECK(hipMalloc((void**)&cuda_result, count * sizeof(int))); hipLaunchKernelGGL(( GenerateKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_result); std::vector<int> result(count); CUDA_CHECK(hipMemcpy(result.data(), cuda_result, count * sizeof(int), hipMemcpyDeviceToHost)); std::sort(result.begin(), result.end()); assert(2 == result[0]); assert(4 == result[1]); state[1] = 0; state[2] = 2; CUDA_CHECK(hipMemcpy(cuda_state, state.data(), 3 * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( CountKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_count); CUDA_CHECK(hipMemcpy(&count, cuda_count, sizeof(int), hipMemcpyDeviceToHost)); assert(2 == count); hipLaunchKernelGGL(( GenerateKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_result); CUDA_CHECK(hipMemcpy(result.data(), cuda_result, count * sizeof(int), hipMemcpyDeviceToHost)); std::sort(result.begin(), result.end()); assert(0 == result[0]); assert(2 == result[1]); state[0] = 1; CUDA_CHECK(hipMemcpy(cuda_state, state.data(), 3 * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( CountKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_count); CUDA_CHECK(hipMemcpy(&count, cuda_count, sizeof(int), hipMemcpyDeviceToHost)); assert(2 == count); hipLaunchKernelGGL(( GenerateKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_result); CUDA_CHECK(hipMemcpy(result.data(), cuda_result, count * sizeof(int), hipMemcpyDeviceToHost)); std::sort(result.begin(), result.end()); assert(1 == result[0]); assert(3 == result[1]); CUDA_CHECK(hipFree(cuda_count)); CUDA_CHECK(hipFree(cuda_result)); CUDA_CHECK(hipFree(cuda_state)); std::cout << "passed GenerateTest" << std::endl; } __global__ void SampleKernel(const CudaSuccessorGenerator generator, const CudaSASPlus problem, const int *state, int *result, hiprandState_t *rng) { *result = Sample(generator, problem, state, rng); } void SampleTest(const CudaSuccessorGenerator &generator, const CudaSASPlus &problem) { int *cuda_count = nullptr; CUDA_CHECK(hipMalloc((void**)&cuda_count, sizeof(int))); std::vector<int> state{0, 1, 0}; int *cuda_state = nullptr; CudaMallocAndCopy((void**)&cuda_state, state.data(), 3 * sizeof(int)); hiprandState_t *rng = nullptr; CUDA_CHECK(hipMalloc((void**)&rng, sizeof(hiprandState_t))); hipLaunchKernelGGL(( SetupStates), dim3(1), dim3(1), 0, 0, 123, rng); int *cuda_result = nullptr; CUDA_CHECK(hipMalloc((void**)&cuda_result, sizeof(int))); hipLaunchKernelGGL(( SampleKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_result, rng); int result; CUDA_CHECK(hipMemcpy(&result, cuda_result, sizeof(int), hipMemcpyDeviceToHost)); assert(2 == result || 4 == result); state[1] = 0; state[2] = 2; CUDA_CHECK(hipMemcpy(cuda_state, state.data(), 3 * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( SampleKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_result, rng); CUDA_CHECK(hipMemcpy(&result, cuda_result, sizeof(int), hipMemcpyDeviceToHost)); assert(0 == result || 2 == result); state[0] = 1; CUDA_CHECK(hipMemcpy(cuda_state, state.data(), 3 * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( SampleKernel), dim3(1), dim3(1), 0, 0, generator, problem, cuda_state, cuda_result, rng); CUDA_CHECK(hipMemcpy(&result, cuda_result, sizeof(int), hipMemcpyDeviceToHost)); assert(1 == result || 3 == result); CUDA_CHECK(hipFree(cuda_count)); CUDA_CHECK(hipFree(rng)); CUDA_CHECK(hipFree(cuda_result)); CUDA_CHECK(hipFree(cuda_state)); std::cout << "passed SampleTest" << std::endl; } int main() { auto lines = ExampleSASPlusLines(); auto problem = std::make_shared<SASPlus>(); problem->InitFromLines(lines); auto generator = std::make_shared<SuccessorGenerator>(problem); CudaSASPlus cuda_problem; InitCudaSASPlus(problem, &cuda_problem); CudaSuccessorGenerator cuda_generator; InitCudaSuccessorGenerator(generator, &cuda_generator); GenerateTest(cuda_generator, cuda_problem); SampleTest(cuda_generator, cuda_problem); FreeCudaSuccessorGenerator(&cuda_generator); FreeCudaSASPlus(&cuda_problem); } std::queue<std::string> ExampleSASPlusLines() { std::queue<std::string> q; q.push("begin_version"); q.push("3"); q.push("end_version"); q.push("begin_metric"); q.push("0"); q.push("end_metric"); q.push("3"); q.push("begin_variable"); q.push("var0"); q.push("-1"); q.push("2"); q.push("Atom at-robby(rooma)"); q.push("Atom at-robby(roomb)"); q.push("end_variable"); q.push("begin_variable"); q.push("var1"); q.push("-1"); q.push("2"); q.push("Atom carry(ball1, left)"); q.push("Atom free(left)"); q.push("end_variable"); q.push("begin_variable"); q.push("var2"); q.push("-1"); q.push("3"); q.push("Atom at(ball1, rooma)"); q.push("Atom at(ball1, roomb)"); q.push("<none of those>"); q.push("end_variable"); q.push("1"); q.push("begin_mutex_group"); q.push("3"); q.push("2 0"); q.push("2 1"); q.push("1 0"); q.push("end_mutex_group"); q.push("begin_state"); q.push("0"); q.push("1"); q.push("0"); q.push("end_state"); q.push("begin_goal"); q.push("1"); q.push("2 1"); q.push("end_goal"); q.push("6"); q.push("begin_operator"); q.push("drop ball1 rooma left"); q.push("1"); q.push("0 0"); q.push("2"); q.push("0 2 -1 0"); q.push("0 1 0 1"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("drop ball1 roomb left"); q.push("1"); q.push("0 1"); q.push("2"); q.push("0 2 -1 1"); q.push("0 1 0 1"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("move rooma roomb"); q.push("0"); q.push("1"); q.push("0 0 0 1"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("move roomb rooma"); q.push("0"); q.push("1"); q.push("0 0 1 0"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("pick ball1 rooma left"); q.push("1"); q.push("0 0"); q.push("2"); q.push("0 2 0 2"); q.push("0 1 1 0"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("pick ball1 roomb left"); q.push("1"); q.push("0 1"); q.push("2"); q.push("0 2 1 2"); q.push("0 1 1 0"); q.push("1"); q.push("end_operator"); q.push("0"); return q; }
0bea4da48775508296641db9d4e99fe969eed300.cu
#include "cuda_successor_generator.cuh" #include <algorithm> #include <iostream> #include <memory> #include <queue> #include <string> #include <vector> #include <curand_kernel.h> #include "cuda_common/cuda_check.cuh" #include "cuda_common/cuda_random.cuh" #include "cuda_sas_plus.cuh" #include "sas_plus.h" #include "successor_generator.h" using namespace pplanner; std::queue<std::string> ExampleSASPlusLines(); __global__ void CountKernel(const CudaSuccessorGenerator generator, const CudaSASPlus problem, const int *state, int *result) { *result = Count(generator, problem, state); } __global__ void GenerateKernel(const CudaSuccessorGenerator generator, const CudaSASPlus problem, const int *state, int *result) { Generate(generator, problem, state, result); } void GenerateTest(const CudaSuccessorGenerator &generator, const CudaSASPlus &problem) { int *cuda_count = nullptr; CUDA_CHECK(cudaMalloc((void**)&cuda_count, sizeof(int))); std::vector<int> state{0, 1, 0}; int *cuda_state = nullptr; CudaMallocAndCopy((void**)&cuda_state, state.data(), 3 * sizeof(int)); CountKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_count); int count = 0; CUDA_CHECK(cudaMemcpy(&count, cuda_count, sizeof(int), cudaMemcpyDeviceToHost)); assert(2 == count); int *cuda_result = nullptr; CUDA_CHECK(cudaMalloc((void**)&cuda_result, count * sizeof(int))); GenerateKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_result); std::vector<int> result(count); CUDA_CHECK(cudaMemcpy(result.data(), cuda_result, count * sizeof(int), cudaMemcpyDeviceToHost)); std::sort(result.begin(), result.end()); assert(2 == result[0]); assert(4 == result[1]); state[1] = 0; state[2] = 2; CUDA_CHECK(cudaMemcpy(cuda_state, state.data(), 3 * sizeof(int), cudaMemcpyHostToDevice)); CountKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_count); CUDA_CHECK(cudaMemcpy(&count, cuda_count, sizeof(int), cudaMemcpyDeviceToHost)); assert(2 == count); GenerateKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_result); CUDA_CHECK(cudaMemcpy(result.data(), cuda_result, count * sizeof(int), cudaMemcpyDeviceToHost)); std::sort(result.begin(), result.end()); assert(0 == result[0]); assert(2 == result[1]); state[0] = 1; CUDA_CHECK(cudaMemcpy(cuda_state, state.data(), 3 * sizeof(int), cudaMemcpyHostToDevice)); CountKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_count); CUDA_CHECK(cudaMemcpy(&count, cuda_count, sizeof(int), cudaMemcpyDeviceToHost)); assert(2 == count); GenerateKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_result); CUDA_CHECK(cudaMemcpy(result.data(), cuda_result, count * sizeof(int), cudaMemcpyDeviceToHost)); std::sort(result.begin(), result.end()); assert(1 == result[0]); assert(3 == result[1]); CUDA_CHECK(cudaFree(cuda_count)); CUDA_CHECK(cudaFree(cuda_result)); CUDA_CHECK(cudaFree(cuda_state)); std::cout << "passed GenerateTest" << std::endl; } __global__ void SampleKernel(const CudaSuccessorGenerator generator, const CudaSASPlus problem, const int *state, int *result, curandState_t *rng) { *result = Sample(generator, problem, state, rng); } void SampleTest(const CudaSuccessorGenerator &generator, const CudaSASPlus &problem) { int *cuda_count = nullptr; CUDA_CHECK(cudaMalloc((void**)&cuda_count, sizeof(int))); std::vector<int> state{0, 1, 0}; int *cuda_state = nullptr; CudaMallocAndCopy((void**)&cuda_state, state.data(), 3 * sizeof(int)); curandState_t *rng = nullptr; CUDA_CHECK(cudaMalloc((void**)&rng, sizeof(curandState_t))); SetupStates<<<1, 1>>>(123, rng); int *cuda_result = nullptr; CUDA_CHECK(cudaMalloc((void**)&cuda_result, sizeof(int))); SampleKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_result, rng); int result; CUDA_CHECK(cudaMemcpy(&result, cuda_result, sizeof(int), cudaMemcpyDeviceToHost)); assert(2 == result || 4 == result); state[1] = 0; state[2] = 2; CUDA_CHECK(cudaMemcpy(cuda_state, state.data(), 3 * sizeof(int), cudaMemcpyHostToDevice)); SampleKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_result, rng); CUDA_CHECK(cudaMemcpy(&result, cuda_result, sizeof(int), cudaMemcpyDeviceToHost)); assert(0 == result || 2 == result); state[0] = 1; CUDA_CHECK(cudaMemcpy(cuda_state, state.data(), 3 * sizeof(int), cudaMemcpyHostToDevice)); SampleKernel<<<1, 1>>>(generator, problem, cuda_state, cuda_result, rng); CUDA_CHECK(cudaMemcpy(&result, cuda_result, sizeof(int), cudaMemcpyDeviceToHost)); assert(1 == result || 3 == result); CUDA_CHECK(cudaFree(cuda_count)); CUDA_CHECK(cudaFree(rng)); CUDA_CHECK(cudaFree(cuda_result)); CUDA_CHECK(cudaFree(cuda_state)); std::cout << "passed SampleTest" << std::endl; } int main() { auto lines = ExampleSASPlusLines(); auto problem = std::make_shared<SASPlus>(); problem->InitFromLines(lines); auto generator = std::make_shared<SuccessorGenerator>(problem); CudaSASPlus cuda_problem; InitCudaSASPlus(problem, &cuda_problem); CudaSuccessorGenerator cuda_generator; InitCudaSuccessorGenerator(generator, &cuda_generator); GenerateTest(cuda_generator, cuda_problem); SampleTest(cuda_generator, cuda_problem); FreeCudaSuccessorGenerator(&cuda_generator); FreeCudaSASPlus(&cuda_problem); } std::queue<std::string> ExampleSASPlusLines() { std::queue<std::string> q; q.push("begin_version"); q.push("3"); q.push("end_version"); q.push("begin_metric"); q.push("0"); q.push("end_metric"); q.push("3"); q.push("begin_variable"); q.push("var0"); q.push("-1"); q.push("2"); q.push("Atom at-robby(rooma)"); q.push("Atom at-robby(roomb)"); q.push("end_variable"); q.push("begin_variable"); q.push("var1"); q.push("-1"); q.push("2"); q.push("Atom carry(ball1, left)"); q.push("Atom free(left)"); q.push("end_variable"); q.push("begin_variable"); q.push("var2"); q.push("-1"); q.push("3"); q.push("Atom at(ball1, rooma)"); q.push("Atom at(ball1, roomb)"); q.push("<none of those>"); q.push("end_variable"); q.push("1"); q.push("begin_mutex_group"); q.push("3"); q.push("2 0"); q.push("2 1"); q.push("1 0"); q.push("end_mutex_group"); q.push("begin_state"); q.push("0"); q.push("1"); q.push("0"); q.push("end_state"); q.push("begin_goal"); q.push("1"); q.push("2 1"); q.push("end_goal"); q.push("6"); q.push("begin_operator"); q.push("drop ball1 rooma left"); q.push("1"); q.push("0 0"); q.push("2"); q.push("0 2 -1 0"); q.push("0 1 0 1"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("drop ball1 roomb left"); q.push("1"); q.push("0 1"); q.push("2"); q.push("0 2 -1 1"); q.push("0 1 0 1"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("move rooma roomb"); q.push("0"); q.push("1"); q.push("0 0 0 1"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("move roomb rooma"); q.push("0"); q.push("1"); q.push("0 0 1 0"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("pick ball1 rooma left"); q.push("1"); q.push("0 0"); q.push("2"); q.push("0 2 0 2"); q.push("0 1 1 0"); q.push("1"); q.push("end_operator"); q.push("begin_operator"); q.push("pick ball1 roomb left"); q.push("1"); q.push("0 1"); q.push("2"); q.push("0 2 1 2"); q.push("0 1 1 0"); q.push("1"); q.push("end_operator"); q.push("0"); return q; }
1a4f9347ea96bec8691d110a664f81d2afef037c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void initData(float *u, float *f, int N) { printf("Hello from initData: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); } __global__ void poissonKernel(float *u, float *u_new, float *f, int N) { printf("Hello from poissonKernel: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); } __global__ void computeError(float error, float *u, float *u_new, int N) { printf("Hello from computeError: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); } __global__ void updateSolution(float *u, float *u_new, int N) { printf("Hello from updateSolution: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); }
1a4f9347ea96bec8691d110a664f81d2afef037c.cu
__global__ void initData(float *u, float *f, int N) { printf("Hello from initData: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); } __global__ void poissonKernel(float *u, float *u_new, float *f, int N) { printf("Hello from poissonKernel: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); } __global__ void computeError(float error, float *u, float *u_new, int N) { printf("Hello from computeError: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); } __global__ void updateSolution(float *u, float *u_new, int N) { printf("Hello from updateSolution: Thread (%d, %d), Block (%d,%d)\n",threadIdx.x,threadIdx.u,blockIdx.x,blockIdx.y); }
b1a3709bde3a109f3cb262e4a57caf03c5204fbc.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////// // // // standard headers plus new one defining tridiagonal solvers // // // //////////////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "trid.h" #include "utilities.h" #define COLS 16 //////////////////////////////////////////////////////////////////// // // // error-checking utility // // // //////////////////////////////////////////////////////////////////// #define cudaSafeCall(err) __cudaSafeCall(err,__FILE__,__LINE__) inline void __cudaSafeCall(hipError_t err, const char *file, const int line){ if(hipSuccess != err) { printf("%s(%i) : cudaSafeCall() Runtime API error : %d %s.\n", file, line, err, hipGetErrorString(err) ); exit(-1); } } #define checkLastError() { \ hipError_t error = hipGetLastError(); \ int id; \ hipGetDevice(&id); \ if(error != hipSuccess) { \ printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \ __FILE__,__LINE__, hipGetErrorString(error), id); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////// // // // explicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// // // linear extrapolation b.c. // template <int pad_left, int pad_total, typename REAL> __global__ void BS_bc1(int NX, int NY, int NZ, REAL *u1) { int t, i, j, k, indg, IOFF, JOFF, KOFF; t = threadIdx.x + blockIdx.x*blockDim.x; IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); if (t<NX*NY) { i = t%NX; j = t/NX; k = NZ; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-KOFF] - u1[indg-2*KOFF]; } else if (t<NX*NY + NY*NZ) { t = t - NX*NY; j = t%NY; k = t/NY; i = NX; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-IOFF] - u1[indg-2*IOFF]; } else if (t<NX*NY + NY*NZ + NZ*NX) { t = t - NX*NY - NY*NZ; k = t%NZ; i = t/NZ; j = NY; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-JOFF] - u1[indg-2*JOFF]; } } // // explicit solvers // template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit1(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u2[indg] = t23 * u1[indg-KOFF-JOFF] + t13 * u1[indg-KOFF-IOFF] + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1[indg-KOFF] + t12 * u1[indg-JOFF-IOFF] + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1[indg-JOFF] + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1[indg-IOFF] + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1[indg] + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1[indg+IOFF] + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1[indg+JOFF] + t12 * u1[indg+JOFF+IOFF] + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1[indg+KOFF] + t13 * u1[indg+KOFF+IOFF] + t23 * u1[indg+KOFF+JOFF]; indg += KOFF; } } } template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit2(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } template <int pad_left, int pad_total, typename REAL, typename REAL2> __launch_bounds__(256, 3) // (max 256 threads per block, min 3 blocks per SMX) __global__ void BS_explicit3(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL2 * __restrict__ u1, REAL2 * __restrict__ u2) { REAL S1m, S1p, S2, S3, t12m, t12p, t13m, t13p, t23; int i, j, k, indg, active, JOFF, KOFF; REAL2 u1_mm, u1_om, u1_pm, u1_mp, u1_op, u1_pp, u; REAL u1_om_w, u1_mm_w, u1_pm_z, u1_op_z; i = threadIdx.x - 1 + blockIdx.x*(blockDim.x-2); j = threadIdx.y + blockIdx.y*blockDim.y; JOFF = (NX+pad_total)/2; KOFF = JOFF*(NY+2); indg = i + pad_left/2 + (j+1)*JOFF; active = (i<=NX/2) && (j<NY); if (active) { u1_mm = u1[indg-JOFF]; u1_om = u1[indg ]; u1_pm = u1[indg+JOFF]; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_om_w = __shfl_up (u1_om.y,1); u1_op_z = __shfl_down(u1_op.x,1); for (k=0; k<NZ; k++) { S1m = ((REAL) (2*i ))*dS; S1p = ((REAL) (2*i+1))*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12m = c12*S2*S1m; t12p = c12*S2*S1p; t13m = c13*S3*S1m; t13p = c13*S3*S1p; t23 = c23*S2*S3; u.x = t23 * u1_mm.x + t13m * u1_om_w + (c1_3*S3*S3 - c2_3*S3 - t13m - t23) * u1_om.x; u.y = t23 * u1_mm.y + t13p * u1_om.x + (c1_3*S3*S3 - c2_3*S3 - t13p - t23) * u1_om.y; u1_mm = u1_mp; u1_om = u1_op; u1_pm = u1_pp; u1_mm_w = __shfl_up (u1_mm.y,1); // u1_mm_z = __shfl_down(u1_mm.x,1); u1_om_w = __shfl_up (u1_om.y,1); // u1_om_z = __shfl_down(u1_om.x,1); == u1_op_z // u1_pm_w = __shfl_up (u1_pm.y,1); u1_pm_z = __shfl_down(u1_pm.x,1); u.x = u.x + t12m * u1_mm_w + (c1_2*S2*S2 - c2_2*S2 - t12m - t23 ) * u1_mm.x + (c1_1*S1m*S1m - c2_1*S1m - t12m - t13m) * u1_om_w + (1.0f - c3 - 2.0f*( c1_1*S1m*S1m + c1_2*S2*S2 + c1_3*S3*S3 - t12m - t13m - t23 ) ) * u1_om.x + (c1_1*S1m*S1m + c2_1*S1m - t12m - t13m) * u1_om.y + (c1_2*S2*S2 + c2_2*S2 - t12m - t23 ) * u1_pm.x + t12m * u1_pm.y; u.y = u.y + t12p * u1_mm.x + (c1_2*S2*S2 - c2_2*S2 - t12p - t23 ) * u1_mm.y + (c1_1*S1p*S1p - c2_1*S1p - t12p - t13p) * u1_om.x + (1.0f - c3 - 2.0f*( c1_1*S1p*S1p + c1_2*S2*S2 + c1_3*S3*S3 - t12p - t13p - t23 ) ) * u1_om.y + (c1_1*S1p*S1p + c2_1*S1p - t12p - t13p) * u1_op_z + (c1_2*S2*S2 + c2_2*S2 - t12p - t23 ) * u1_pm.y + t12p * u1_pm_z; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_op_z = __shfl_down(u1_op.x,1); u.x = u.x + (c1_3*S3*S3 + c2_3*S3 - t13m - t23) * u1_op.x + t13m * u1_op.y + t23 * u1_pp.x; u.y = u.y + (c1_3*S3*S3 + c2_3*S3 - t13p - t23) * u1_op.y + t13p * u1_op_z + t23 * u1_pp.y; if (threadIdx.x>0 && threadIdx.x<blockDim.x-1 && i<NX/2) u2[indg-KOFF] = u; } } } //////////////////////////////////////////////////////////////////// // // // implicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_rhs(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + ( - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } // // solves tridiagonal equations in x-direction, and increments solution // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_x(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u, const REAL* __restrict__ rhs ) { volatile __shared__ REAL smem[(256+8)*4]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int j, k, tid; tid = threadIdx.x; j = threadIdx.y; k = blockIdx.x; rhs = rhs + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); u = u + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for ( ; j<NY; j=j+4) { for (int i=0; i<8; i++) { S = (8*tid+i) * dS; lambda = c1*S*S; gamma = c2*S; a[i] = - ( lambda - gamma ); b[i] = 1.0f + c3 + 2.0f*lambda; c[i] = - ( lambda + gamma ); } if (tid==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f + c3 - 2.0f*gamma; c[7] = 0.0f; } int off = threadIdx.y*(256+8); loadDataIntoRegisters_contig<8,32>(tid,256,d,smem+off,rhs,(REAL)0.0); trid_warp<8>(a,b,c,d); incDataFromRegisters_contig<8,32>(tid,256,d,smem+off,u); rhs = rhs + 4*(NX+pad_total); // increment pointers for next line u = u + 4*(NX+pad_total); } } // // solves tridiagonal equations in y-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_y(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = 8*threadIdx.y; k = blockIdx.x; u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (j+n) * dS; lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)]; } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)] = d[n]; u = u + COLS; // increment pointers for next lines } } // // similar to BS_implicit2_y but solving in z-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_z(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = blockIdx.x; // swapping j, k in these two lines k = 8*threadIdx.y; // is one difference from implicit2_y u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (k+n) * dS; // changing j to k here is another lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)*(NY+2)]; // and a different offset here ... } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)*(NY+2)] = d[n]; // ... and here u = u + COLS; // increment pointers for next lines } } //////////////////////////////////////////////////////////////////// // // // main code to test all solvers for single & double precision // // // //////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { int NX=256, NY=256, NZ=256, N, imid; float *u_h, *u1_d, *u2_d, *foo_d; double *U_h, *U1_d, *U2_d, *Foo_d, val, err; int pad_left, pad_total; // initialise CUDA timing float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // allocate memory for arrays int prod = (NX+32)*(NY+2)*(NZ+2)+2; u_h = (float *)malloc(prod*sizeof(float)); U_h = (double *)malloc(prod*sizeof(double)); hipMalloc((void **)&u1_d, (prod+1)*sizeof(float)); hipMalloc((void **)&U1_d, (prod+1)*sizeof(double)); hipMalloc((void **)&u2_d, (prod+1)*sizeof(float)); hipMalloc((void **)&U2_d, (prod+1)*sizeof(double)); // execute kernels for (int prec=0; prec<2; prec++) { if (prec==0) { printf("\nsingle precision performance tests \n"); cudaSafeCall(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeFourByte)); } else { printf("\ndouble precision performance tests \n"); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); } printf("---------------------------------- \n"); printf(" method exec time GFinsts GFlops value at strike \n"); for (int pass=0; pass<4; pass++) { pad_left = 32; pad_total = 32; if (pass<3) { N = 500; hipDeviceSetCacheConfig(hipFuncCachePreferL1); } else { N = 100; //hipDeviceSetCacheConfig(hipFuncCachePreferShared); hipFuncSetCacheConfig(BS_implicit2_x<32,32,double>, hipFuncCachePreferShared); hipFuncSetCacheConfig(BS_implicit2_y<32,32,double>, hipFuncCachePreferL1); hipFuncSetCacheConfig(BS_implicit2_z<32,32,double>, hipFuncCachePreferL1); hipFuncSetCacheConfig(BS_implicit2_rhs<32,32,double>, hipFuncCachePreferL1); hipFuncSetCacheConfig(BS_implicit2_x<32,32,float>, hipFuncCachePreferShared); hipFuncSetCacheConfig(BS_implicit2_y<32,32,float>, hipFuncCachePreferL1); hipFuncSetCacheConfig(BS_implicit2_z<32,32,float>, hipFuncCachePreferL1); hipFuncSetCacheConfig(BS_implicit2_rhs<32,32,float>, hipFuncCachePreferL1); } double Smax=200.0, K=100.0, r=0.05, sigma=0.2, T=0.05; double dS = Smax / 255.0; double dt = T / ( (double) N); double C1 = 0.5*dt*sigma*sigma / (dS*dS); double C2 = 0.5*dt*r / dS; double C3 = r*dt; float c1=C1, c2=C2, c3=C3, ds=dS; // initialise array (call on minimum of 3 assets) and copy over for (int i=-1; i<NX; i++) { for (int j=-1; j<NY; j++) { for (int k=-1; k<NZ; k++) { int indg = (i+pad_left) + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); // U_h[indg] = fmax(0.0, fmin(i*dS, fmin(j*dS,k*dS)) - K); U_h[indg] = fmax(0.0, i*dS-K); u_h[indg] = U_h[indg]; } } } if (prec==0) { hipMemcpy(u1_d,u_h, prod*sizeof(float) ,hipMemcpyHostToDevice); hipMemcpy(u2_d,u_h, prod*sizeof(float) ,hipMemcpyHostToDevice); } else { hipMemcpy(U1_d,U_h, prod*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(U2_d,U_h, prod*sizeof(double),hipMemcpyHostToDevice); } // now do main computation int BLOCK_X = 64; int BLOCK_Y = 4; int bc_threads = BLOCK_X*BLOCK_Y; int bc_blocks = 1 + (NX*NY + NY*NZ + NZ*NX - 1) / bc_threads; int bx = 1 + (NX-1)/BLOCK_X; int by = 1 + (NY-1)/BLOCK_Y; if (pass==2) { BLOCK_X = 32; BLOCK_Y = 8; bx = 1 + (NX/2-1)/(BLOCK_X-2); by = 1 + (NY-1)/BLOCK_Y; } dim3 threads(BLOCK_X,BLOCK_Y); dim3 blocks(bx,by); hipEventRecord(start); for (int n=1; n<=N; n++) { if (prec==0) { hipLaunchKernelGGL(( BS_bc1<32,32>), dim3(bc_blocks), dim3(bc_threads), 0, 0, NX,NY,NZ, u1_d); if (pass==0) hipLaunchKernelGGL(( BS_explicit1<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==1) hipLaunchKernelGGL(( BS_explicit2<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==2) hipLaunchKernelGGL(( BS_explicit3<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, (float2*)(u1_d), (float2*)(u2_d)); else if (pass==3) { hipLaunchKernelGGL(( BS_implicit2_rhs<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); hipLaunchKernelGGL(( BS_implicit2_y<32,32>), dim3(NZ), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, ds, c1,c2,c3, u2_d); hipLaunchKernelGGL(( BS_implicit2_z<32,32>), dim3(NY), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, ds, c1,c2,c3, u2_d); hipLaunchKernelGGL(( BS_implicit2_x<32,32>), dim3(NZ), dim3(dim3(32,4)), 0, 0, NX,NY,NZ, ds, c1,c2,c3, u1_d, u2_d); } if (pass<3) {foo_d=u1_d; u1_d=u2_d; u2_d=foo_d;} // swap u1, u2 pointers } else { hipLaunchKernelGGL(( BS_bc1<32,32>), dim3(bc_blocks), dim3(bc_threads), 0, 0, NX,NY,NZ, U1_d); if (pass==0) hipLaunchKernelGGL(( BS_explicit1<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==1) hipLaunchKernelGGL(( BS_explicit2<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==2) hipLaunchKernelGGL(( BS_explicit3<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, (double2*)(U1_d), (double2*)(U2_d)); else if (pass==3) { hipLaunchKernelGGL(( BS_implicit2_rhs<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); hipLaunchKernelGGL(( BS_implicit2_y<32,32>), dim3(NZ), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, dS, C1,C2,C3, U2_d); hipLaunchKernelGGL(( BS_implicit2_z<32,32>), dim3(NY), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, dS, C1,C2,C3, U2_d); hipLaunchKernelGGL(( BS_implicit2_x<32,32>), dim3(NZ), dim3(dim3(32,4)), 0, 0, NX,NY,NZ, dS, C1,C2,C3, U1_d, U2_d); } if (pass<3) {Foo_d=U1_d; U1_d=U2_d; U2_d=Foo_d;} // swap U1, U2 pointers } } hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milli, start, stop); // checkLastError(); // imid = (NX/2+1) + (NY/2+1)*(NX+2) + (NZ/2+1)*(NX+2)*(NY+2); imid = (NX/2+pad_left) + (NY/2+1)*(NX+pad_total) + (NZ/2+1)*(NX+pad_total)*(NY+2); if (prec==0) { hipMemcpy(u_h,u1_d,prod*sizeof(float), hipMemcpyDeviceToHost); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); // if (i==NX/2 && k==NX/2) printf(" %d %f \n",j,u_h[ind]-u_h[imid]); } } if (err > 1e-2) printf(" %d %f \n",i,err); } val = u_h[imid]; } else { hipMemcpy(U_h,U1_d,prod*sizeof(double), hipMemcpyDeviceToHost); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); } } if (err > 1e-8) printf(" %d %f \n",i,err); } val = U_h[imid]; } if (pass<3) printf("explicit%d %9.0f %38.6f \n",pass+1,milli,val); else printf("implicit%d %9.0f %38.6f \n",pass-1,milli,val); } } // CUDA exit -- needed to flush printf write buffer //cudaSafeCall(hipDeviceSynchronize()); //cudaSafeCall(hipDeviceReset()); return 0; }
b1a3709bde3a109f3cb262e4a57caf03c5204fbc.cu
//////////////////////////////////////////////////////////////////// // // // standard headers plus new one defining tridiagonal solvers // // // //////////////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "trid.h" #include "utilities.h" #define COLS 16 //////////////////////////////////////////////////////////////////// // // // error-checking utility // // // //////////////////////////////////////////////////////////////////// #define cudaSafeCall(err) __cudaSafeCall(err,__FILE__,__LINE__) inline void __cudaSafeCall(cudaError err, const char *file, const int line){ if(cudaSuccess != err) { printf("%s(%i) : cudaSafeCall() Runtime API error : %d %s.\n", file, line, err, cudaGetErrorString(err) ); exit(-1); } } #define checkLastError() { \ cudaError_t error = cudaGetLastError(); \ int id; \ cudaGetDevice(&id); \ if(error != cudaSuccess) { \ printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \ __FILE__,__LINE__, cudaGetErrorString(error), id); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////// // // // explicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// // // linear extrapolation b.c. // template <int pad_left, int pad_total, typename REAL> __global__ void BS_bc1(int NX, int NY, int NZ, REAL *u1) { int t, i, j, k, indg, IOFF, JOFF, KOFF; t = threadIdx.x + blockIdx.x*blockDim.x; IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); if (t<NX*NY) { i = t%NX; j = t/NX; k = NZ; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-KOFF] - u1[indg-2*KOFF]; } else if (t<NX*NY + NY*NZ) { t = t - NX*NY; j = t%NY; k = t/NY; i = NX; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-IOFF] - u1[indg-2*IOFF]; } else if (t<NX*NY + NY*NZ + NZ*NX) { t = t - NX*NY - NY*NZ; k = t%NZ; i = t/NZ; j = NY; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-JOFF] - u1[indg-2*JOFF]; } } // // explicit solvers // template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit1(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u2[indg] = t23 * u1[indg-KOFF-JOFF] + t13 * u1[indg-KOFF-IOFF] + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1[indg-KOFF] + t12 * u1[indg-JOFF-IOFF] + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1[indg-JOFF] + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1[indg-IOFF] + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1[indg] + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1[indg+IOFF] + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1[indg+JOFF] + t12 * u1[indg+JOFF+IOFF] + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1[indg+KOFF] + t13 * u1[indg+KOFF+IOFF] + t23 * u1[indg+KOFF+JOFF]; indg += KOFF; } } } template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit2(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } template <int pad_left, int pad_total, typename REAL, typename REAL2> __launch_bounds__(256, 3) // (max 256 threads per block, min 3 blocks per SMX) __global__ void BS_explicit3(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL2 * __restrict__ u1, REAL2 * __restrict__ u2) { REAL S1m, S1p, S2, S3, t12m, t12p, t13m, t13p, t23; int i, j, k, indg, active, JOFF, KOFF; REAL2 u1_mm, u1_om, u1_pm, u1_mp, u1_op, u1_pp, u; REAL u1_om_w, u1_mm_w, u1_pm_z, u1_op_z; i = threadIdx.x - 1 + blockIdx.x*(blockDim.x-2); j = threadIdx.y + blockIdx.y*blockDim.y; JOFF = (NX+pad_total)/2; KOFF = JOFF*(NY+2); indg = i + pad_left/2 + (j+1)*JOFF; active = (i<=NX/2) && (j<NY); if (active) { u1_mm = u1[indg-JOFF]; u1_om = u1[indg ]; u1_pm = u1[indg+JOFF]; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_om_w = __shfl_up (u1_om.y,1); u1_op_z = __shfl_down(u1_op.x,1); for (k=0; k<NZ; k++) { S1m = ((REAL) (2*i ))*dS; S1p = ((REAL) (2*i+1))*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12m = c12*S2*S1m; t12p = c12*S2*S1p; t13m = c13*S3*S1m; t13p = c13*S3*S1p; t23 = c23*S2*S3; u.x = t23 * u1_mm.x + t13m * u1_om_w + (c1_3*S3*S3 - c2_3*S3 - t13m - t23) * u1_om.x; u.y = t23 * u1_mm.y + t13p * u1_om.x + (c1_3*S3*S3 - c2_3*S3 - t13p - t23) * u1_om.y; u1_mm = u1_mp; u1_om = u1_op; u1_pm = u1_pp; u1_mm_w = __shfl_up (u1_mm.y,1); // u1_mm_z = __shfl_down(u1_mm.x,1); u1_om_w = __shfl_up (u1_om.y,1); // u1_om_z = __shfl_down(u1_om.x,1); == u1_op_z // u1_pm_w = __shfl_up (u1_pm.y,1); u1_pm_z = __shfl_down(u1_pm.x,1); u.x = u.x + t12m * u1_mm_w + (c1_2*S2*S2 - c2_2*S2 - t12m - t23 ) * u1_mm.x + (c1_1*S1m*S1m - c2_1*S1m - t12m - t13m) * u1_om_w + (1.0f - c3 - 2.0f*( c1_1*S1m*S1m + c1_2*S2*S2 + c1_3*S3*S3 - t12m - t13m - t23 ) ) * u1_om.x + (c1_1*S1m*S1m + c2_1*S1m - t12m - t13m) * u1_om.y + (c1_2*S2*S2 + c2_2*S2 - t12m - t23 ) * u1_pm.x + t12m * u1_pm.y; u.y = u.y + t12p * u1_mm.x + (c1_2*S2*S2 - c2_2*S2 - t12p - t23 ) * u1_mm.y + (c1_1*S1p*S1p - c2_1*S1p - t12p - t13p) * u1_om.x + (1.0f - c3 - 2.0f*( c1_1*S1p*S1p + c1_2*S2*S2 + c1_3*S3*S3 - t12p - t13p - t23 ) ) * u1_om.y + (c1_1*S1p*S1p + c2_1*S1p - t12p - t13p) * u1_op_z + (c1_2*S2*S2 + c2_2*S2 - t12p - t23 ) * u1_pm.y + t12p * u1_pm_z; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_op_z = __shfl_down(u1_op.x,1); u.x = u.x + (c1_3*S3*S3 + c2_3*S3 - t13m - t23) * u1_op.x + t13m * u1_op.y + t23 * u1_pp.x; u.y = u.y + (c1_3*S3*S3 + c2_3*S3 - t13p - t23) * u1_op.y + t13p * u1_op_z + t23 * u1_pp.y; if (threadIdx.x>0 && threadIdx.x<blockDim.x-1 && i<NX/2) u2[indg-KOFF] = u; } } } //////////////////////////////////////////////////////////////////// // // // implicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_rhs(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + ( - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } // // solves tridiagonal equations in x-direction, and increments solution // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_x(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u, const REAL* __restrict__ rhs ) { volatile __shared__ REAL smem[(256+8)*4]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int j, k, tid; tid = threadIdx.x; j = threadIdx.y; k = blockIdx.x; rhs = rhs + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); u = u + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for ( ; j<NY; j=j+4) { for (int i=0; i<8; i++) { S = (8*tid+i) * dS; lambda = c1*S*S; gamma = c2*S; a[i] = - ( lambda - gamma ); b[i] = 1.0f + c3 + 2.0f*lambda; c[i] = - ( lambda + gamma ); } if (tid==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f + c3 - 2.0f*gamma; c[7] = 0.0f; } int off = threadIdx.y*(256+8); loadDataIntoRegisters_contig<8,32>(tid,256,d,smem+off,rhs,(REAL)0.0); trid_warp<8>(a,b,c,d); incDataFromRegisters_contig<8,32>(tid,256,d,smem+off,u); rhs = rhs + 4*(NX+pad_total); // increment pointers for next line u = u + 4*(NX+pad_total); } } // // solves tridiagonal equations in y-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_y(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = 8*threadIdx.y; k = blockIdx.x; u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (j+n) * dS; lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)]; } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)] = d[n]; u = u + COLS; // increment pointers for next lines } } // // similar to BS_implicit2_y but solving in z-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_z(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = blockIdx.x; // swapping j, k in these two lines k = 8*threadIdx.y; // is one difference from implicit2_y u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (k+n) * dS; // changing j to k here is another lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)*(NY+2)]; // and a different offset here ... } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)*(NY+2)] = d[n]; // ... and here u = u + COLS; // increment pointers for next lines } } //////////////////////////////////////////////////////////////////// // // // main code to test all solvers for single & double precision // // // //////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { int NX=256, NY=256, NZ=256, N, imid; float *u_h, *u1_d, *u2_d, *foo_d; double *U_h, *U1_d, *U2_d, *Foo_d, val, err; int pad_left, pad_total; // initialise CUDA timing float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate memory for arrays int prod = (NX+32)*(NY+2)*(NZ+2)+2; u_h = (float *)malloc(prod*sizeof(float)); U_h = (double *)malloc(prod*sizeof(double)); cudaMalloc((void **)&u1_d, (prod+1)*sizeof(float)); cudaMalloc((void **)&U1_d, (prod+1)*sizeof(double)); cudaMalloc((void **)&u2_d, (prod+1)*sizeof(float)); cudaMalloc((void **)&U2_d, (prod+1)*sizeof(double)); // execute kernels for (int prec=0; prec<2; prec++) { if (prec==0) { printf("\nsingle precision performance tests \n"); cudaSafeCall(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte)); } else { printf("\ndouble precision performance tests \n"); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); } printf("---------------------------------- \n"); printf(" method exec time GFinsts GFlops value at strike \n"); for (int pass=0; pass<4; pass++) { pad_left = 32; pad_total = 32; if (pass<3) { N = 500; cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); } else { N = 100; //cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cudaFuncSetCacheConfig(BS_implicit2_x<32,32,double>, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(BS_implicit2_y<32,32,double>, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(BS_implicit2_z<32,32,double>, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(BS_implicit2_rhs<32,32,double>, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(BS_implicit2_x<32,32,float>, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(BS_implicit2_y<32,32,float>, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(BS_implicit2_z<32,32,float>, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(BS_implicit2_rhs<32,32,float>, cudaFuncCachePreferL1); } double Smax=200.0, K=100.0, r=0.05, sigma=0.2, T=0.05; double dS = Smax / 255.0; double dt = T / ( (double) N); double C1 = 0.5*dt*sigma*sigma / (dS*dS); double C2 = 0.5*dt*r / dS; double C3 = r*dt; float c1=C1, c2=C2, c3=C3, ds=dS; // initialise array (call on minimum of 3 assets) and copy over for (int i=-1; i<NX; i++) { for (int j=-1; j<NY; j++) { for (int k=-1; k<NZ; k++) { int indg = (i+pad_left) + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); // U_h[indg] = fmax(0.0, fmin(i*dS, fmin(j*dS,k*dS)) - K); U_h[indg] = fmax(0.0, i*dS-K); u_h[indg] = U_h[indg]; } } } if (prec==0) { cudaMemcpy(u1_d,u_h, prod*sizeof(float) ,cudaMemcpyHostToDevice); cudaMemcpy(u2_d,u_h, prod*sizeof(float) ,cudaMemcpyHostToDevice); } else { cudaMemcpy(U1_d,U_h, prod*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(U2_d,U_h, prod*sizeof(double),cudaMemcpyHostToDevice); } // now do main computation int BLOCK_X = 64; int BLOCK_Y = 4; int bc_threads = BLOCK_X*BLOCK_Y; int bc_blocks = 1 + (NX*NY + NY*NZ + NZ*NX - 1) / bc_threads; int bx = 1 + (NX-1)/BLOCK_X; int by = 1 + (NY-1)/BLOCK_Y; if (pass==2) { BLOCK_X = 32; BLOCK_Y = 8; bx = 1 + (NX/2-1)/(BLOCK_X-2); by = 1 + (NY-1)/BLOCK_Y; } dim3 threads(BLOCK_X,BLOCK_Y); dim3 blocks(bx,by); cudaEventRecord(start); for (int n=1; n<=N; n++) { if (prec==0) { BS_bc1<32,32><<<bc_blocks, bc_threads>>>(NX,NY,NZ, u1_d); if (pass==0) BS_explicit1<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==1) BS_explicit2<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==2) BS_explicit3<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, (float2*)(u1_d), (float2*)(u2_d)); else if (pass==3) { BS_implicit2_rhs<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); BS_implicit2_y<32,32><<<NZ, dim3(COLS,32)>>>(NX,NY,NZ, ds, c1,c2,c3, u2_d); BS_implicit2_z<32,32><<<NY, dim3(COLS,32)>>>(NX,NY,NZ, ds, c1,c2,c3, u2_d); BS_implicit2_x<32,32><<<NZ, dim3(32,4)>>>(NX,NY,NZ, ds, c1,c2,c3, u1_d, u2_d); } if (pass<3) {foo_d=u1_d; u1_d=u2_d; u2_d=foo_d;} // swap u1, u2 pointers } else { BS_bc1<32,32><<<bc_blocks, bc_threads>>>(NX,NY,NZ, U1_d); if (pass==0) BS_explicit1<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==1) BS_explicit2<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==2) BS_explicit3<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, (double2*)(U1_d), (double2*)(U2_d)); else if (pass==3) { BS_implicit2_rhs<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); BS_implicit2_y<32,32><<<NZ, dim3(COLS,32)>>>(NX,NY,NZ, dS, C1,C2,C3, U2_d); BS_implicit2_z<32,32><<<NY, dim3(COLS,32)>>>(NX,NY,NZ, dS, C1,C2,C3, U2_d); BS_implicit2_x<32,32><<<NZ, dim3(32,4)>>>(NX,NY,NZ, dS, C1,C2,C3, U1_d, U2_d); } if (pass<3) {Foo_d=U1_d; U1_d=U2_d; U2_d=Foo_d;} // swap U1, U2 pointers } } cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milli, start, stop); // checkLastError(); // imid = (NX/2+1) + (NY/2+1)*(NX+2) + (NZ/2+1)*(NX+2)*(NY+2); imid = (NX/2+pad_left) + (NY/2+1)*(NX+pad_total) + (NZ/2+1)*(NX+pad_total)*(NY+2); if (prec==0) { cudaMemcpy(u_h,u1_d,prod*sizeof(float), cudaMemcpyDeviceToHost); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); // if (i==NX/2 && k==NX/2) printf(" %d %f \n",j,u_h[ind]-u_h[imid]); } } if (err > 1e-2) printf(" %d %f \n",i,err); } val = u_h[imid]; } else { cudaMemcpy(U_h,U1_d,prod*sizeof(double), cudaMemcpyDeviceToHost); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); } } if (err > 1e-8) printf(" %d %f \n",i,err); } val = U_h[imid]; } if (pass<3) printf("explicit%d %9.0f %38.6f \n",pass+1,milli,val); else printf("implicit%d %9.0f %38.6f \n",pass-1,milli,val); } } // CUDA exit -- needed to flush printf write buffer //cudaSafeCall(cudaThreadSynchronize()); //cudaSafeCall(cudaDeviceReset()); return 0; }
b380fc5ccb4abbbd1fa4f2d253eeb5f40d88d93b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include "support.h" #include "kernel.hip" int main(int argc, char* argv[]) { Timer timer; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); Matrix M_h, N_h, P_h; // M: filter, N: input image, P: output image Matrix N_d, P_d; unsigned imageHeight, imageWidth; hipError_t cuda_ret; dim3 dim_grid, dim_block; /* Read image dimensions */ if (argc == 1) { imageHeight = 600; imageWidth = 1000; } else if (argc == 2) { imageHeight = atoi(argv[1]); imageWidth = atoi(argv[1]); } else if (argc == 3) { imageHeight = atoi(argv[1]); imageWidth = atoi(argv[2]); } else { printf("\n Invalid input parameters!" "\n Usage: ./convolution # Image is 600 x 1000" "\n Usage: ./convolution <m> # Image is m x m" "\n Usage: ./convolution <m> <n> # Image is m x n" "\n"); exit(0); } /* Allocate host memory */ M_h = allocateMatrix(FILTER_SIZE, FILTER_SIZE); N_h = allocateMatrix(imageHeight, imageWidth); P_h = allocateMatrix(imageHeight, imageWidth); /* Initialize filter and images */ initMatrix(M_h); initMatrix(N_h); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" Image: %u x %u\n", imageHeight, imageWidth); printf(" Mask: %u x %u\n", FILTER_SIZE, FILTER_SIZE); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); N_d = allocateDeviceMatrix(imageHeight, imageWidth); P_d = allocateDeviceMatrix(imageHeight, imageWidth); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); /* Copy image to device global memory */ copyToDeviceMatrix(N_d, N_h); /* Copy mask to device constant memory */ cuda_ret = hipMemcpyToSymbol(M_c, M_h.elements, M_h.height*M_h.width * sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to copy to constant memory"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); dim_block.x = BLOCK_SIZE; dim_block.y = BLOCK_SIZE; dim_block.z = 1; dim_grid.x = imageWidth/TILE_SIZE; if(imageWidth%TILE_SIZE != 0) dim_grid.x++; dim_grid.y = imageHeight/TILE_SIZE; if(imageHeight%TILE_SIZE != 0) dim_grid.y++; dim_grid.z = 1; hipLaunchKernelGGL(( convolution), dim3(dim_grid), dim3(dim_block), 0, 0, N_d, P_d); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); copyFromDeviceMatrix(P_h, P_d); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(M_h, N_h, P_h); // Free memory ------------------------------------------------------------ freeMatrix(M_h); freeMatrix(N_h); freeMatrix(P_h); freeDeviceMatrix(N_d); freeDeviceMatrix(P_d); return 0; }
b380fc5ccb4abbbd1fa4f2d253eeb5f40d88d93b.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include "support.h" #include "kernel.cu" int main(int argc, char* argv[]) { Timer timer; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); Matrix M_h, N_h, P_h; // M: filter, N: input image, P: output image Matrix N_d, P_d; unsigned imageHeight, imageWidth; cudaError_t cuda_ret; dim3 dim_grid, dim_block; /* Read image dimensions */ if (argc == 1) { imageHeight = 600; imageWidth = 1000; } else if (argc == 2) { imageHeight = atoi(argv[1]); imageWidth = atoi(argv[1]); } else if (argc == 3) { imageHeight = atoi(argv[1]); imageWidth = atoi(argv[2]); } else { printf("\n Invalid input parameters!" "\n Usage: ./convolution # Image is 600 x 1000" "\n Usage: ./convolution <m> # Image is m x m" "\n Usage: ./convolution <m> <n> # Image is m x n" "\n"); exit(0); } /* Allocate host memory */ M_h = allocateMatrix(FILTER_SIZE, FILTER_SIZE); N_h = allocateMatrix(imageHeight, imageWidth); P_h = allocateMatrix(imageHeight, imageWidth); /* Initialize filter and images */ initMatrix(M_h); initMatrix(N_h); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" Image: %u x %u\n", imageHeight, imageWidth); printf(" Mask: %u x %u\n", FILTER_SIZE, FILTER_SIZE); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); N_d = allocateDeviceMatrix(imageHeight, imageWidth); P_d = allocateDeviceMatrix(imageHeight, imageWidth); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); /* Copy image to device global memory */ copyToDeviceMatrix(N_d, N_h); /* Copy mask to device constant memory */ cuda_ret = cudaMemcpyToSymbol(M_c, M_h.elements, M_h.height*M_h.width * sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to copy to constant memory"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel ---------------------------------------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); dim_block.x = BLOCK_SIZE; dim_block.y = BLOCK_SIZE; dim_block.z = 1; dim_grid.x = imageWidth/TILE_SIZE; if(imageWidth%TILE_SIZE != 0) dim_grid.x++; dim_grid.y = imageHeight/TILE_SIZE; if(imageHeight%TILE_SIZE != 0) dim_grid.y++; dim_grid.z = 1; convolution<<<dim_grid, dim_block>>>(N_d, P_d); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); copyFromDeviceMatrix(P_h, P_d); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(M_h, N_h, P_h); // Free memory ------------------------------------------------------------ freeMatrix(M_h); freeMatrix(N_h); freeMatrix(P_h); freeDeviceMatrix(N_d); freeDeviceMatrix(P_d); return 0; }
238e33642260ccecb5f28fb0f0a1e2cb17c6df6b.hip
// !!! This is a file automatically generated by hipify!!! #include "main-pr.hpp" #define THROW_AWAY 0 #include "Padded2DArray.hpp" #include <omp.h> #include "memutils.hpp" #include <cmath> //#define SHOWLOADBALANCE //#define LOG #ifdef LOG #include "logged_array.hpp" #endif #include <hip/hip_runtime_api.h> #include <cusparse_v2.h> #include <rocblas.h> #include "helper_cuda.h" #include "LightSpMV_interface.hpp" template <typename VertexType, typename EdgeType, typename Scalar> int main_pr(VertexType nVtx, EdgeType* xadj_, VertexType *adj_, Scalar* val_, Scalar *prior_, Scalar* pr_, Scalar lambda, int nTry, //algo parameter util::timestamp& totaltime, std::string& ) { bool coldcache = true; util::timestamp start(0,0); //cpuside variables Scalar* prin_ = new Scalar[nVtx]; EdgeType* xadj = xadj_; VertexType *adj = adj_; Scalar* val = val_; Scalar* prior = prior_; Scalar* prin = prin_; Scalar* prout = pr_; Scalar alpha = lambda; Scalar beta = 1-lambda; //cuda side variable EdgeType* d_xadj ; VertexType *d_adj ; Scalar* d_val ; Scalar* d_prior ; Scalar* d_prin ; Scalar* d_prout ; // Scalar *d_alpha; //Scalar *d_beta; /* Get handle to the CUBLAS context */ hipblasHandle_t cublasHandle = 0; hipblasStatus_t cublasStatus; cublasStatus = hipblasCreate(&cublasHandle); /* Get handle to the CUSPARSE context */ hipsparseHandle_t cusparseHandle = 0; hipsparseStatus_t cusparseStatus; cusparseStatus = hipsparseCreate(&cusparseHandle); hipsparseMatDescr_t descr = 0; cusparseStatus = hipsparseCreateMatDescr(&descr); hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO); //memalloc checkCudaErrors( hipMalloc((void**)&d_xadj, (nVtx+1)*sizeof(*xadj)) ); checkCudaErrors( hipMalloc((void**)&d_adj, (xadj[nVtx])*sizeof(*adj)) ); checkCudaErrors( hipMalloc((void**)&d_val, (xadj[nVtx])*sizeof(*val)) ); checkCudaErrors( hipMalloc((void**)&d_prior, (nVtx*sizeof(*prior)))); checkCudaErrors( hipMalloc((void**)&d_prin, (nVtx*sizeof(*prin)) )); checkCudaErrors( hipMalloc((void**)&d_prout, (nVtx*sizeof(*prout)) )); //cpu to gpu copies checkCudaErrors( hipMemcpy(d_xadj, xadj, (nVtx+1)*sizeof(*xadj), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_adj, adj, (xadj[nVtx])*sizeof(*adj), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_val, val, (xadj[nVtx])*sizeof(*val), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_prior, prior, nVtx*sizeof(*prior), hipMemcpyHostToDevice) ); lightSpMVCSRKernel lspmv; for (int TRY=0; TRY<THROW_AWAY+nTry; ++TRY) { if (TRY >= THROW_AWAY) start = util::timestamp(); for (int iter = 0; iter < 40 ; ++ iter) { //setup prin if (iter == 0) //std::copy (prior, prior+nVtx, prin); checkCudaErrors(hipMemcpy(d_prin, d_prior, nVtx*sizeof(*prior), hipMemcpyDeviceToDevice)); else //std::copy (prout, prout+nVtx, prin); checkCudaErrors(hipMemcpy(d_prin, d_prout, nVtx*sizeof(*prout), hipMemcpyDeviceToDevice)); Scalar eps = 0.; //prout = A prin //prout = lambda * prout + (1-lambda) prior checkCudaErrors(hipMemcpy(d_prout, d_prior, nVtx*sizeof(*prior), hipMemcpyDeviceToDevice)); //for float it is S. //does prout = alpha A prin + beta prout // cusparseStatus = hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, // nVtx, nVtx, xadj[nVtx], &alpha, // descr, // d_val, d_xadj, d_adj, // d_prin, &beta, // d_prout); // hipDeviceSynchronize(); lspmv.spmvBLAS(nVtx, nVtx, xadj[nVtx], (uint32_t*)d_xadj, (uint32_t*)d_adj, d_val, d_prin, d_prout, alpha, beta, 0); hipDeviceSynchronize(); //compute epsilon //using prin to compute epsilon float epsalpha = -1; cublasStatus = hipblasSaxpy (cublasHandle, nVtx, &epsalpha, d_prout, 1, d_prin, 1); // d_prin = d_prout*-1 + d_prin if (cublasStatus != HIPBLAS_STATUS_SUCCESS) std::cerr<<"err"<<std::endl; cublasStatus = hipblasSasum(cublasHandle, nVtx, d_prin, 1, &eps); if (cublasStatus != HIPBLAS_STATUS_SUCCESS) std::cerr<<"err"<<std::endl; //stopping condition if (eps < 0) // deactivited for testing purposes iter = 20; std::cerr<<eps<<std::endl; } checkCudaErrors(hipMemcpy(prout, d_prout, nVtx*sizeof(*prout), hipMemcpyDeviceToHost)); std::cerr<<"PR[0]="<<prout[0]<<std::endl; if (TRY >= THROW_AWAY) { util::timestamp stop; totaltime += stop - start; } #ifndef LOG if (coldcache) { #pragma omp parallel { evict_array_from_cache(adj, xadj[nVtx]*sizeof(*adj)); evict_array_from_cache(xadj, (nVtx+1)*sizeof(*xadj)); evict_array_from_cache(val, xadj[nVtx]*sizeof(*val)); evict_array_from_cache(prior, nVtx*sizeof(*prior)); evict_array_from_cache(prin, nVtx*sizeof(*prin)); evict_array_from_cache(prout, nVtx*sizeof(*prout)); #pragma omp barrier } } #endif } #ifdef SHOWLOADBALANCE std::cout<<"load balance"<<std::endl; for (int i=0; i< 244; ++i) std::cout<<count[i]<<std::endl; #endif delete[] prin_; return 0; }
238e33642260ccecb5f28fb0f0a1e2cb17c6df6b.cu
#include "main-pr.hpp" #define THROW_AWAY 0 #include "Padded2DArray.hpp" #include <omp.h> #include "memutils.hpp" #include <cmath> //#define SHOWLOADBALANCE //#define LOG #ifdef LOG #include "logged_array.hpp" #endif #include <cuda_runtime_api.h> #include <cusparse_v2.h> #include <cublas_v2.h> #include "helper_cuda.h" #include "LightSpMV_interface.hpp" template <typename VertexType, typename EdgeType, typename Scalar> int main_pr(VertexType nVtx, EdgeType* xadj_, VertexType *adj_, Scalar* val_, Scalar *prior_, Scalar* pr_, Scalar lambda, int nTry, //algo parameter util::timestamp& totaltime, std::string& ) { bool coldcache = true; util::timestamp start(0,0); //cpuside variables Scalar* prin_ = new Scalar[nVtx]; EdgeType* xadj = xadj_; VertexType *adj = adj_; Scalar* val = val_; Scalar* prior = prior_; Scalar* prin = prin_; Scalar* prout = pr_; Scalar alpha = lambda; Scalar beta = 1-lambda; //cuda side variable EdgeType* d_xadj ; VertexType *d_adj ; Scalar* d_val ; Scalar* d_prior ; Scalar* d_prin ; Scalar* d_prout ; // Scalar *d_alpha; //Scalar *d_beta; /* Get handle to the CUBLAS context */ cublasHandle_t cublasHandle = 0; cublasStatus_t cublasStatus; cublasStatus = cublasCreate(&cublasHandle); /* Get handle to the CUSPARSE context */ cusparseHandle_t cusparseHandle = 0; cusparseStatus_t cusparseStatus; cusparseStatus = cusparseCreate(&cusparseHandle); cusparseMatDescr_t descr = 0; cusparseStatus = cusparseCreateMatDescr(&descr); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); //memalloc checkCudaErrors( cudaMalloc((void**)&d_xadj, (nVtx+1)*sizeof(*xadj)) ); checkCudaErrors( cudaMalloc((void**)&d_adj, (xadj[nVtx])*sizeof(*adj)) ); checkCudaErrors( cudaMalloc((void**)&d_val, (xadj[nVtx])*sizeof(*val)) ); checkCudaErrors( cudaMalloc((void**)&d_prior, (nVtx*sizeof(*prior)))); checkCudaErrors( cudaMalloc((void**)&d_prin, (nVtx*sizeof(*prin)) )); checkCudaErrors( cudaMalloc((void**)&d_prout, (nVtx*sizeof(*prout)) )); //cpu to gpu copies checkCudaErrors( cudaMemcpy(d_xadj, xadj, (nVtx+1)*sizeof(*xadj), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_adj, adj, (xadj[nVtx])*sizeof(*adj), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_val, val, (xadj[nVtx])*sizeof(*val), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_prior, prior, nVtx*sizeof(*prior), cudaMemcpyHostToDevice) ); lightSpMVCSRKernel lspmv; for (int TRY=0; TRY<THROW_AWAY+nTry; ++TRY) { if (TRY >= THROW_AWAY) start = util::timestamp(); for (int iter = 0; iter < 40 ; ++ iter) { //setup prin if (iter == 0) //std::copy (prior, prior+nVtx, prin); checkCudaErrors(cudaMemcpy(d_prin, d_prior, nVtx*sizeof(*prior), cudaMemcpyDeviceToDevice)); else //std::copy (prout, prout+nVtx, prin); checkCudaErrors(cudaMemcpy(d_prin, d_prout, nVtx*sizeof(*prout), cudaMemcpyDeviceToDevice)); Scalar eps = 0.; //prout = A prin //prout = lambda * prout + (1-lambda) prior checkCudaErrors(cudaMemcpy(d_prout, d_prior, nVtx*sizeof(*prior), cudaMemcpyDeviceToDevice)); //for float it is S. //does prout = alpha A prin + beta prout // cusparseStatus = cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, // nVtx, nVtx, xadj[nVtx], &alpha, // descr, // d_val, d_xadj, d_adj, // d_prin, &beta, // d_prout); // cudaThreadSynchronize(); lspmv.spmvBLAS(nVtx, nVtx, xadj[nVtx], (uint32_t*)d_xadj, (uint32_t*)d_adj, d_val, d_prin, d_prout, alpha, beta, 0); cudaThreadSynchronize(); //compute epsilon //using prin to compute epsilon float epsalpha = -1; cublasStatus = cublasSaxpy (cublasHandle, nVtx, &epsalpha, d_prout, 1, d_prin, 1); // d_prin = d_prout*-1 + d_prin if (cublasStatus != CUBLAS_STATUS_SUCCESS) std::cerr<<"err"<<std::endl; cublasStatus = cublasSasum(cublasHandle, nVtx, d_prin, 1, &eps); if (cublasStatus != CUBLAS_STATUS_SUCCESS) std::cerr<<"err"<<std::endl; //stopping condition if (eps < 0) // deactivited for testing purposes iter = 20; std::cerr<<eps<<std::endl; } checkCudaErrors(cudaMemcpy(prout, d_prout, nVtx*sizeof(*prout), cudaMemcpyDeviceToHost)); std::cerr<<"PR[0]="<<prout[0]<<std::endl; if (TRY >= THROW_AWAY) { util::timestamp stop; totaltime += stop - start; } #ifndef LOG if (coldcache) { #pragma omp parallel { evict_array_from_cache(adj, xadj[nVtx]*sizeof(*adj)); evict_array_from_cache(xadj, (nVtx+1)*sizeof(*xadj)); evict_array_from_cache(val, xadj[nVtx]*sizeof(*val)); evict_array_from_cache(prior, nVtx*sizeof(*prior)); evict_array_from_cache(prin, nVtx*sizeof(*prin)); evict_array_from_cache(prout, nVtx*sizeof(*prout)); #pragma omp barrier } } #endif } #ifdef SHOWLOADBALANCE std::cout<<"load balance"<<std::endl; for (int i=0; i< 244; ++i) std::cout<<count[i]<<std::endl; #endif delete[] prin_; return 0; }
cbfbd83c8a6c17cf5788d425fda9cd9dd05266c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "CONV_cuBLAS.cuh" __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } //__global__ void im2col_gpu_kernel(const int n, const float* data_im, // const int height, const int width, const int ksize, const int pad, // const int stride, const int height_col, const int width_col, // float* data_col) { // CUDA_KERNEL_LOOP(op_idx, n) { // int index = op_idx; // int w_out = index % width_col; // // index /= width_col; // int h_out = index % height_col; // int channel_in = index / height_col; // int channel_out = channel_in * ksize * ksize; // int h_in = h_out * stride - pad; // int w_in = w_out * stride - pad; // // float* temp_col = data_col+ (channel_out * height_col + h_out) * width_col + w_out; // const float* temp_img = data_im + (channel_in * height + h_in) * width + w_in; // // for (int i = 0; i < ksize; ++i) { // for (int j = 0; j < ksize; ++j) { // int h = h_in + i; // int w = w_in + j; // *temp_col = (h >= 0 && w >= 0 && h < height && w < width) ? // temp_img[i * width + j] : 0; // temp_col += height_col * width_col; // } // } // } //} void im2col_gpu(const float* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, float* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, height, width, ksize, pad, stride, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } /* // Explicit instantiation template void im2col_gpu<float>(const float* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, float* data_col); template void im2col_gpu<double>(const double* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, double* data_col); */ // Helper function for using CUDA to add vectors in parallel. //const float* data_im // raw data, //const int channels // image channels //const int height //image height //const int width // image width //const int ksize // kernel size //const int pad // pad size //const int stride // stride size //const int height_col // output column height //const int width_col // output column width //float* data_col // outpu data
cbfbd83c8a6c17cf5788d425fda9cd9dd05266c7.cu
#include "common.h" #include "CONV_cuBLAS.cuh" __global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { int w_out = index % width_col; int h_index = index / width_col; int h_out = h_index % height_col; int channel_in = h_index / height_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; float* data_col_ptr = data_col; data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out; const float* data_im_ptr = data_im; data_im_ptr += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { int h = h_in + i; int w = w_in + j; *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } //__global__ void im2col_gpu_kernel(const int n, const float* data_im, // const int height, const int width, const int ksize, const int pad, // const int stride, const int height_col, const int width_col, // float* data_col) { // CUDA_KERNEL_LOOP(op_idx, n) { // int index = op_idx; // int w_out = index % width_col; // // index /= width_col; // int h_out = index % height_col; // int channel_in = index / height_col; // int channel_out = channel_in * ksize * ksize; // int h_in = h_out * stride - pad; // int w_in = w_out * stride - pad; // // float* temp_col = data_col+ (channel_out * height_col + h_out) * width_col + w_out; // const float* temp_img = data_im + (channel_in * height + h_in) * width + w_in; // // for (int i = 0; i < ksize; ++i) { // for (int j = 0; j < ksize; ++j) { // int h = h_in + i; // int w = w_in + j; // *temp_col = (h >= 0 && w >= 0 && h < height && w < width) ? // temp_img[i * width + j] : 0; // temp_col += height_col * width_col; // } // } // } //} void im2col_gpu(const float* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, float* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, height, width, ksize, pad, stride, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } /* // Explicit instantiation template void im2col_gpu<float>(const float* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, float* data_col); template void im2col_gpu<double>(const double* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, double* data_col); */ // Helper function for using CUDA to add vectors in parallel. //const float* data_im // raw data, //const int channels // image channels //const int height //image height //const int width // image width //const int ksize // kernel size //const int pad // pad size //const int stride // stride size //const int height_col // output column height //const int width_col // output column width //float* data_col // outpu data
7830599bd43ef07d5ef1660dc417cb78176a234d.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/reduction/reduction_functions.h" #include <algorithm> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "core/common/common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "core/providers/cuda/cu_inc/unary_elementwise_impl.cuh" namespace onnxruntime { namespace cuda { namespace detail { constexpr auto MAX_NUM_ELEMENTS_PER_THREAD = 4; constexpr auto MAX_NUM_WARPS_PER_BLOCK = 8; constexpr auto MAX_NUM_BLOCKS_IN_GRID_ROW = 256; constexpr auto MAX_NUM_GRID_ROWS = 32768; dim3 compute_block_dim(int num_cols) { const int x = GPU_WARP_SIZE; const int y = ::min(MAX_NUM_WARPS_PER_BLOCK, ::max(1, num_cols / (MAX_NUM_ELEMENTS_PER_THREAD * x))); return dim3(x, y); } std::pair<dim3, dim3> compute_grid_and_block_dims(int num_rows, int num_cols) { const auto block_dim = compute_block_dim(num_cols); const auto grid_x = std::min<int>( MAX_NUM_BLOCKS_IN_GRID_ROW, std::max<int>(1, num_cols / (MAX_NUM_ELEMENTS_PER_THREAD * block_dim.x * block_dim.y))); const auto grid_y = ::min(MAX_NUM_GRID_ROWS, num_rows); const dim3 grid_dim(grid_x, grid_y); return {grid_dim, block_dim}; } uintptr_t round_up_to_aligned(uintptr_t original, size_t alignment) { assert((alignment & (alignment - 1)) == 0); const size_t alignment_mask = ~(alignment - 1); return (original + alignment - 1) & alignment_mask; } /** * call_reduce_matrix_columns() intermediate buffer layout * * Given buffer element type TBuf, the intermediate buffer layout looks like this: * * ----- * m * num_blocks_per_row * sizeof(TBuf) bytes for block reductions per row * alignment padding bytes as needed * m * sizeof(int) bytes for block done counts per row * ----- */ size_t compute_reduce_matrix_columns_intermediate_buffer_size( int element_size, int num_rows, int num_cols) { ORT_ENFORCE(element_size >= 0 && num_rows >= 0 && num_cols >= 0); const auto grid_dim = compute_grid_and_block_dims(num_rows, num_cols).first; size_t buffer_size{}; // at the beginning, for sizing purposes, assume we are aligned buffer_size += static_cast<size_t>(num_rows) * grid_dim.x * element_size; buffer_size = round_up_to_aligned(buffer_size, alignof(int)); buffer_size += static_cast<size_t>(num_rows) * sizeof(int); // add padding to give us room to align buffer_size += alignof(max_align_t) - 1; return buffer_size; } template <typename TBuf> Status get_reduction_buffers( int num_rows, int num_cols, void* buffer, size_t buffer_size, TBuf*& block_reductions_buffer, int*& block_done_counts_buffer) { const auto grid_dim = compute_grid_and_block_dims(num_rows, num_cols).first; const uintptr_t begin_addr = reinterpret_cast<uintptr_t>(buffer); const uintptr_t block_reductions_addr = round_up_to_aligned(begin_addr, alignof(TBuf)); const uintptr_t block_done_counts_buffer_addr = round_up_to_aligned( block_reductions_addr + static_cast<size_t>(num_rows) * grid_dim.x * sizeof(TBuf), alignof(int)); const uintptr_t end_addr = block_done_counts_buffer_addr + static_cast<size_t>(num_rows) * sizeof(int); const size_t required_size = end_addr - begin_addr; ORT_RETURN_IF_NOT( required_size <= buffer_size, "Buffer size is too small (", buffer_size, " bytes). ", "At least ", required_size, " bytes are needed from the given base address (", buffer, ")."); block_reductions_buffer = reinterpret_cast<TBuf*>(block_reductions_addr); block_done_counts_buffer = reinterpret_cast<int*>(block_done_counts_buffer_addr); return Status::OK(); } template <typename TIn, typename TOut, typename TBuf, typename TOp, typename TFinalOp, bool DivideResultBySize> __device__ void reduce_all( const int num_elements, const TIn* const input, TOut* const output, TBuf* const block_reductions_buffer, int* const block_done_count_buffer) { extern __shared__ unsigned char shared_memory_bytes[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_bytes); // Thread-level indices: // Linear index of thread in block. const int tid_in_block = threadIdx.y * blockDim.x + threadIdx.x; // Total number of threads in a 2-D block. const int num_threads_in_block = blockDim.x * blockDim.y; // Warp-level indices: // Warp index of thread. const int wid_in_block = tid_in_block / GPU_WARP_SIZE; // Lane index of thread. const int lid_in_block = tid_in_block % GPU_WARP_SIZE; // Warp count per block. const int num_warps_in_block = num_threads_in_block / GPU_WARP_SIZE; // Grid-level indices: // Linear index of block in grid row. const int bid_in_grid_row = blockIdx.x; // Linear index of thread in grid row. const int tid_in_grid_row = bid_in_grid_row * (blockDim.x * blockDim.y) + tid_in_block; // Total number of blocks in a grid row. const int num_blocks_in_grid_row = gridDim.x; // Total number of threads in a grid row with 2-D blocks. const int num_threads_in_grid_row = num_blocks_in_grid_row * num_threads_in_block; const auto write_result = [&output, &num_elements](const TOut result) { // Compilation time if-else branch controlled by template argument can be // optimized out, so there will be no branch in real computation phase. if (DivideResultBySize) { output[0] = TFinalOp()(result / TOut(num_elements)); } else { output[0] = TFinalOp()(result); } }; // Thread-level reduction (storage change: global memory -> register). // One thread reduces MAX_NUM_ELEMENTS_PER_THREAD elements to a thread register // in one iteration. TBuf value = 0; for (int id = tid_in_grid_row; id < num_elements; id += MAX_NUM_ELEMENTS_PER_THREAD * num_threads_in_grid_row) { TIn v[MAX_NUM_ELEMENTS_PER_THREAD]; #pragma unroll for (int i = 0; i < MAX_NUM_ELEMENTS_PER_THREAD; i++) { const int offset = id + i * num_threads_in_grid_row; if (offset < num_elements) { v[i] = input[offset]; } } #pragma unroll for (int i = 0; i < MAX_NUM_ELEMENTS_PER_THREAD; i++) { const int offset = id + i * num_threads_in_grid_row; if (offset < num_elements) { value += TOp()(TBuf(v[i])); } } } #if __CUDA_ARCH__ >= 700 __syncwarp(); #else __syncthreads(); #endif // Warp-level reduction (storage change: register -> register). // The values in a warp will be summed up to a scalar. After warp-level // reduction, each block holds num_warps_in_block values in the shared memory. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { value += WARP_SHFL_DOWN(value, stride); } // Return early if only one warp is used for reduction. // Given a fixed amount of threads, we prefer threads over warps over blocks so that we never have cases such as // 1. two blocks and each of them has only 1 warp (32 threads). // 2. two warps and each of them has only 2 threads. if (num_warps_in_block == 1) { if (tid_in_grid_row == 0) { write_result(value); } return; } if (lid_in_block == 0) { shared_memory[wid_in_block] = value; } __syncthreads(); // Block-level reduction (storage change: shared memory -> global memory). // The values in a block will be summed up to a scalar. // Note that the values are stored in the shared memory. // Here we assume that the size of shared_memory is smaller // than num_warps_in_block, so we just keep halving the number // of threads in each iteration. Our assumption is always true because // the size of shared_memory equals to the number of warps. #pragma unroll for (int stride = MAX_NUM_WARPS_PER_BLOCK / 2; stride > 0; stride /= 2) { if (tid_in_block + stride < num_warps_in_block) { shared_memory[tid_in_block] += shared_memory[tid_in_block + stride]; } __syncthreads(); } // Return early if only one block is used for reduction. if (num_blocks_in_grid_row == 1) { if (tid_in_grid_row == 0) { write_result(shared_memory[0]); } return; } if (tid_in_block == 0) { block_reductions_buffer[bid_in_grid_row] = shared_memory[0]; } __threadfence(); __syncthreads(); // Grid-level reduction. We use the last block to sum up values // stored in the global block_reductions_buffer. __shared__ bool is_last_block_done; if (tid_in_block == 0) { const int count = atomicAdd(block_done_count_buffer, 1); is_last_block_done = (count == (num_blocks_in_grid_row - 1)); } // All threads in each block see if they belong the last active block // (i.e., the value of is_last_block_done). __syncthreads(); // Only the block which saw that count equals to num_blocks_in_grid_row - 1 can // enter the following block. if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_grid_row); for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (tid_in_block < stride && tid_in_block + stride < num_blocks_in_grid_row) { block_reductions_buffer[tid_in_block] += block_reductions_buffer[tid_in_block + stride]; } __syncthreads(); } // The first thread in the last block assigns the final output. if (tid_in_block == 0) { write_result(block_reductions_buffer[0]); } } } template <typename TIn, typename TOut, typename TBuf, typename TOp, typename TFinalOp, bool DivideResultBySize> __global__ void reduce_matrix_columns_kernel( const int num_rows, const int num_cols, const TIn* const input, TOut* const output, TBuf* const block_reductions_buffer, int* const block_done_counts_buffer) { const int num_blocks_in_grid_row = gridDim.x; const int row_id_in_grid = blockIdx.y; const int num_grid_rows = gridDim.y; // one row per iteration // row_id is int64_t to avoid int overflow in offset calculations for (int64_t row_id = row_id_in_grid; row_id < num_rows; row_id += num_grid_rows) { const TIn* const row_data = input + row_id * num_cols; TOut* const row_output = output + row_id; TBuf* const row_block_reductions_buffer = block_reductions_buffer + row_id * num_blocks_in_grid_row; int* const row_block_done_counts_buffer = block_done_counts_buffer + row_id; reduce_all<TIn, TOut, TBuf, TOp, TFinalOp, DivideResultBySize>( num_cols, row_data, row_output, row_block_reductions_buffer, row_block_done_counts_buffer); } } template <typename TIn, typename TOut, typename TOp, typename TFinalOp, bool DivideResultBySize> Status call_reduce_matrix_columns( hipStream_t stream, const TIn* input, TOut* output, const int num_rows, const int num_cols, void* buffer, size_t buffer_size) { ORT_ENFORCE(num_rows >= 0 && num_cols >= 0); using TBuf = AccumulationType_t<TIn>; const auto grid_and_block_dims = compute_grid_and_block_dims(num_rows, num_cols); const dim3& grid_dim = grid_and_block_dims.first; const dim3& block_dim = grid_and_block_dims.second; TBuf* block_reductions_buffer; int* block_done_counts_buffer; ORT_RETURN_IF_ERROR(get_reduction_buffers( num_rows, num_cols, buffer, buffer_size, block_reductions_buffer, block_done_counts_buffer)); // If more than one block is used per grid row, then inter-block reduction is needed. if (grid_dim.x > 1) { CUDA_RETURN_IF_ERROR(hipMemsetAsync(block_done_counts_buffer, 0, num_rows * sizeof(int), stream)); } const int shared_mem_size = sizeof(TBuf) * block_dim.x * block_dim.y / GPU_WARP_SIZE; hipLaunchKernelGGL(( reduce_matrix_columns_kernel<TIn, TOut, TBuf, TOp, TFinalOp, DivideResultBySize>) , dim3(grid_dim), dim3(block_dim), shared_mem_size, stream, num_rows, num_cols, input, output, block_reductions_buffer, block_done_counts_buffer); return Status::OK(); } } // namespace detail template <typename TIn, typename TOut> Status reduce_sum( hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Identity, Identity, false>( stream, input, output, 1, size, buffer, buffer_size); } template <typename TIn, typename TOut> Status reduce_square_sum( hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Square, Identity, false>( stream, input, output, 1, size, buffer, buffer_size); } template <typename TIn, typename TOut> Status reduce_l2_norm( hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Square, Sqrt, false>( stream, input, output, 1, size, buffer, buffer_size); } template <typename TIn, typename TOut> Status reduce_mean( hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Identity, Identity, true>( stream, input, output, 1, size, buffer, buffer_size); } #define INSTANTIATE_REDUCE_SUM(TIn, TOut) \ template Status reduce_sum<TIn, TOut>(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_SUM(half, half); INSTANTIATE_REDUCE_SUM(half, float); INSTANTIATE_REDUCE_SUM(float, float); INSTANTIATE_REDUCE_SUM(double, double); #undef INSTANTIATE_REDUCE_SUM #define INSTANTIATE_REDUCE_SQUARE_SUM(TIn, TOut) \ template Status reduce_square_sum<TIn, TOut>(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_SQUARE_SUM(half, float); INSTANTIATE_REDUCE_SQUARE_SUM(float, float); INSTANTIATE_REDUCE_SQUARE_SUM(double, double); #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_REDUCE_SQUARE_SUM(nv_bfloat16, float); #endif #undef INSTANTIATE_REDUCE_SQUARE_SUM #define INSTANTIATE_REDUCE_L2_NORM(TIn, TOut) \ template Status reduce_l2_norm<TIn, TOut>(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_L2_NORM(half, float); INSTANTIATE_REDUCE_L2_NORM(float, float); INSTANTIATE_REDUCE_L2_NORM(double, double); #undef INSTANTIATE_REDUCE_L2_NORM #define INSTANTIATE_REDUCE_MEAN(TIn, TOut) \ template Status reduce_mean<TIn, TOut>(hipStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_MEAN(half, float); INSTANTIATE_REDUCE_MEAN(float, float); INSTANTIATE_REDUCE_MEAN(double, double); #undef INSTANTIATE_REDUCE_MEAN namespace detail { template <typename TIn, typename TOut, typename TBuf> __global__ void reduce_matrix_rows_kernel(const TIn* input, TOut* output, int m, int n) { constexpr int x_load_count_per_thread = 1; constexpr int y_load_count_per_thread = 4; const int t_count_x_in_grid = blockDim.x * gridDim.x; const int t_count_y_in_grid = blockDim.y * gridDim.y; const int x_grid_stride = t_count_x_in_grid * x_load_count_per_thread; const int y_grid_stride = t_count_y_in_grid * y_load_count_per_thread; const int tid_x_in_grid = threadIdx.x + blockDim.x * blockIdx.x; const int tid_y_in_grid = threadIdx.y + blockDim.y * blockIdx.y; const int tid_in_block = threadIdx.x + blockDim.x * threadIdx.y; // Shape is blockDim.y-by-blockDim.x and element type is TBuf. extern __shared__ unsigned char shared_memory_bytes[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_bytes); // to prevent int overflow in index calculation for input size m*n const int64_t n_int64 = static_cast<int64_t>(n); for (int col = tid_x_in_grid; col < n; col += x_grid_stride) { shared_memory[tid_in_block] = TBuf(0.0f); TBuf sum = TBuf(0.0f); // This loops load multiple blockDim.y-by-blockDim.x sub-tensors from the input. for (int row = tid_y_in_grid; row < m; row += y_grid_stride) { // Thread-level reduction. Each thread loads y_load_count_per_thread values // and aggregrate them. #pragma unroll(y_load_count_per_thread) for (int row_inner = 0; row_inner < y_load_count_per_thread; ++row_inner) { int row_final = row + row_inner * t_count_y_in_grid; int col_final = col; if (row_final < m && col_final < n) { sum += TBuf(input[row_final * n_int64 + col_final]); } } } // Write thread-level reduction result into shared memory. shared_memory[tid_in_block] = sum; // Wait all threads to finish their thread-level reductions. __syncthreads(); // This loop conducts reduction on elements stored in shared memory. // Each block reduces blockDim.y-by-blockDim.x tensor to 1-by-blockDim.x tensor. #pragma unroll(4) for (int stride = blockDim.y / 2; stride > 0; stride /= 2) { if (threadIdx.y < stride) { shared_memory[tid_in_block] += shared_memory[tid_in_block + stride * blockDim.x]; } __syncthreads(); } if (threadIdx.y == 0) { atomic_add(output + col, TOut(shared_memory[threadIdx.x])); } } } template <typename TIn, typename TOut, typename TBuf> Status call_reduce_matrix_rows(hipStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output) { ORT_ENFORCE(m >= 0 && n >= 0); if (reset_initial_output) { CUDA_RETURN_IF_ERROR(hipMemsetAsync(output, 0, n * sizeof(TOut), stream)); } constexpr int max_num_threads_in_block = 512; constexpr int max_num_blocks_in_grid = 512; constexpr int load_count_per_thread = 4; const int block_x_dim = least_pow2_bound(::max(1, ::min(n, GPU_WARP_SIZE))); const int block_y_dim = least_pow2_bound(::max(1, ::min(max_num_threads_in_block / block_x_dim, m / load_count_per_thread))); const int grid_x_dim = ::max(1, ::min(n / block_x_dim, max_num_blocks_in_grid)); const int grid_y_dim = ::max(1, ::min(max_num_blocks_in_grid / grid_x_dim, m / block_y_dim / 4)); const dim3 grid(grid_x_dim, grid_y_dim, 1); const dim3 block(block_x_dim, block_y_dim, 1); hipLaunchKernelGGL(( reduce_matrix_rows_kernel<TIn, TOut, TBuf>), dim3(grid), dim3(block), block.y * block.x * sizeof(TBuf), stream, input, output, m, n); return Status::OK(); } } // namespace detail template <typename T> struct OP_Div { __device__ __inline__ T operator()(const T& a) const { return a / v_; } OP_Div(T v) : v_(v) {} T v_; }; template <typename T> void UnaryDiv(hipStream_t stream, const T* input, T* output, T denominator, size_t count) { UnaryElementWiseImpl(stream, input, output, OP_Div<T>(denominator), count); } #define INSTANTIATE_UNARY_DIV(T) \ template void UnaryDiv<T>(hipStream_t stream, const T* input, T* output, T denominator, size_t count) INSTANTIATE_UNARY_DIV(half); INSTANTIATE_UNARY_DIV(float); INSTANTIATE_UNARY_DIV(double); #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_UNARY_DIV(nv_bfloat16); #endif #undef INSTANTIATE_UNARY_DIV template <typename TIn, typename TOut> Status reduce_matrix_rows(hipStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output) { using TBuf = AccumulationType_t<TIn>; return detail::call_reduce_matrix_rows<TIn, TOut, TBuf>(stream, input, output, m, n, reset_initial_output); } #define INSTANTIATE_REDUCE_MATRIX_ROWS(T) \ template Status reduce_matrix_rows<T, T>(hipStream_t stream, const T* input, T* output, int m, int n, bool reset_initial_output) INSTANTIATE_REDUCE_MATRIX_ROWS(half); INSTANTIATE_REDUCE_MATRIX_ROWS(float); INSTANTIATE_REDUCE_MATRIX_ROWS(double); #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_REDUCE_MATRIX_ROWS(nv_bfloat16); #endif #undef INSTANTIATE_REDUCE_MATRIX_ROWS template <typename TIn, typename TOut> Status reduce_matrix_columns(hipStream_t stream, const TIn* input, TOut* output, int m, int n, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Identity, Identity, false>( stream, input, output, m, n, buffer, buffer_size); } #define INSTANTIATE_REDUCE_MATRIX_COLUMNS(T) \ template Status reduce_matrix_columns<T, T>(hipStream_t stream, const T* input, T* output, int m, int n, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_MATRIX_COLUMNS(half); INSTANTIATE_REDUCE_MATRIX_COLUMNS(float); INSTANTIATE_REDUCE_MATRIX_COLUMNS(double); #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_REDUCE_MATRIX_COLUMNS(nv_bfloat16); #endif #undef INSTANTIATE_REDUCE_MATRIX_COLUMNS } // namespace cuda } // namespace onnxruntime
7830599bd43ef07d5ef1660dc417cb78176a234d.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/reduction/reduction_functions.h" #include <algorithm> #include <cuda.h> #include <cuda_fp16.h> #include "core/common/common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/cuda_utils.h" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "core/providers/cuda/cu_inc/unary_elementwise_impl.cuh" namespace onnxruntime { namespace cuda { namespace detail { constexpr auto MAX_NUM_ELEMENTS_PER_THREAD = 4; constexpr auto MAX_NUM_WARPS_PER_BLOCK = 8; constexpr auto MAX_NUM_BLOCKS_IN_GRID_ROW = 256; constexpr auto MAX_NUM_GRID_ROWS = 32768; dim3 compute_block_dim(int num_cols) { const int x = GPU_WARP_SIZE; const int y = std::min(MAX_NUM_WARPS_PER_BLOCK, std::max(1, num_cols / (MAX_NUM_ELEMENTS_PER_THREAD * x))); return dim3(x, y); } std::pair<dim3, dim3> compute_grid_and_block_dims(int num_rows, int num_cols) { const auto block_dim = compute_block_dim(num_cols); const auto grid_x = std::min<int>( MAX_NUM_BLOCKS_IN_GRID_ROW, std::max<int>(1, num_cols / (MAX_NUM_ELEMENTS_PER_THREAD * block_dim.x * block_dim.y))); const auto grid_y = std::min(MAX_NUM_GRID_ROWS, num_rows); const dim3 grid_dim(grid_x, grid_y); return {grid_dim, block_dim}; } uintptr_t round_up_to_aligned(uintptr_t original, size_t alignment) { assert((alignment & (alignment - 1)) == 0); const size_t alignment_mask = ~(alignment - 1); return (original + alignment - 1) & alignment_mask; } /** * call_reduce_matrix_columns() intermediate buffer layout * * Given buffer element type TBuf, the intermediate buffer layout looks like this: * * ----- * m * num_blocks_per_row * sizeof(TBuf) bytes for block reductions per row * alignment padding bytes as needed * m * sizeof(int) bytes for block done counts per row * ----- */ size_t compute_reduce_matrix_columns_intermediate_buffer_size( int element_size, int num_rows, int num_cols) { ORT_ENFORCE(element_size >= 0 && num_rows >= 0 && num_cols >= 0); const auto grid_dim = compute_grid_and_block_dims(num_rows, num_cols).first; size_t buffer_size{}; // at the beginning, for sizing purposes, assume we are aligned buffer_size += static_cast<size_t>(num_rows) * grid_dim.x * element_size; buffer_size = round_up_to_aligned(buffer_size, alignof(int)); buffer_size += static_cast<size_t>(num_rows) * sizeof(int); // add padding to give us room to align buffer_size += alignof(max_align_t) - 1; return buffer_size; } template <typename TBuf> Status get_reduction_buffers( int num_rows, int num_cols, void* buffer, size_t buffer_size, TBuf*& block_reductions_buffer, int*& block_done_counts_buffer) { const auto grid_dim = compute_grid_and_block_dims(num_rows, num_cols).first; const uintptr_t begin_addr = reinterpret_cast<uintptr_t>(buffer); const uintptr_t block_reductions_addr = round_up_to_aligned(begin_addr, alignof(TBuf)); const uintptr_t block_done_counts_buffer_addr = round_up_to_aligned( block_reductions_addr + static_cast<size_t>(num_rows) * grid_dim.x * sizeof(TBuf), alignof(int)); const uintptr_t end_addr = block_done_counts_buffer_addr + static_cast<size_t>(num_rows) * sizeof(int); const size_t required_size = end_addr - begin_addr; ORT_RETURN_IF_NOT( required_size <= buffer_size, "Buffer size is too small (", buffer_size, " bytes). ", "At least ", required_size, " bytes are needed from the given base address (", buffer, ")."); block_reductions_buffer = reinterpret_cast<TBuf*>(block_reductions_addr); block_done_counts_buffer = reinterpret_cast<int*>(block_done_counts_buffer_addr); return Status::OK(); } template <typename TIn, typename TOut, typename TBuf, typename TOp, typename TFinalOp, bool DivideResultBySize> __device__ void reduce_all( const int num_elements, const TIn* const input, TOut* const output, TBuf* const block_reductions_buffer, int* const block_done_count_buffer) { extern __shared__ unsigned char shared_memory_bytes[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_bytes); // Thread-level indices: // Linear index of thread in block. const int tid_in_block = threadIdx.y * blockDim.x + threadIdx.x; // Total number of threads in a 2-D block. const int num_threads_in_block = blockDim.x * blockDim.y; // Warp-level indices: // Warp index of thread. const int wid_in_block = tid_in_block / GPU_WARP_SIZE; // Lane index of thread. const int lid_in_block = tid_in_block % GPU_WARP_SIZE; // Warp count per block. const int num_warps_in_block = num_threads_in_block / GPU_WARP_SIZE; // Grid-level indices: // Linear index of block in grid row. const int bid_in_grid_row = blockIdx.x; // Linear index of thread in grid row. const int tid_in_grid_row = bid_in_grid_row * (blockDim.x * blockDim.y) + tid_in_block; // Total number of blocks in a grid row. const int num_blocks_in_grid_row = gridDim.x; // Total number of threads in a grid row with 2-D blocks. const int num_threads_in_grid_row = num_blocks_in_grid_row * num_threads_in_block; const auto write_result = [&output, &num_elements](const TOut result) { // Compilation time if-else branch controlled by template argument can be // optimized out, so there will be no branch in real computation phase. if (DivideResultBySize) { output[0] = TFinalOp()(result / TOut(num_elements)); } else { output[0] = TFinalOp()(result); } }; // Thread-level reduction (storage change: global memory -> register). // One thread reduces MAX_NUM_ELEMENTS_PER_THREAD elements to a thread register // in one iteration. TBuf value = 0; for (int id = tid_in_grid_row; id < num_elements; id += MAX_NUM_ELEMENTS_PER_THREAD * num_threads_in_grid_row) { TIn v[MAX_NUM_ELEMENTS_PER_THREAD]; #pragma unroll for (int i = 0; i < MAX_NUM_ELEMENTS_PER_THREAD; i++) { const int offset = id + i * num_threads_in_grid_row; if (offset < num_elements) { v[i] = input[offset]; } } #pragma unroll for (int i = 0; i < MAX_NUM_ELEMENTS_PER_THREAD; i++) { const int offset = id + i * num_threads_in_grid_row; if (offset < num_elements) { value += TOp()(TBuf(v[i])); } } } #if __CUDA_ARCH__ >= 700 __syncwarp(); #else __syncthreads(); #endif // Warp-level reduction (storage change: register -> register). // The values in a warp will be summed up to a scalar. After warp-level // reduction, each block holds num_warps_in_block values in the shared memory. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { value += WARP_SHFL_DOWN(value, stride); } // Return early if only one warp is used for reduction. // Given a fixed amount of threads, we prefer threads over warps over blocks so that we never have cases such as // 1. two blocks and each of them has only 1 warp (32 threads). // 2. two warps and each of them has only 2 threads. if (num_warps_in_block == 1) { if (tid_in_grid_row == 0) { write_result(value); } return; } if (lid_in_block == 0) { shared_memory[wid_in_block] = value; } __syncthreads(); // Block-level reduction (storage change: shared memory -> global memory). // The values in a block will be summed up to a scalar. // Note that the values are stored in the shared memory. // Here we assume that the size of shared_memory is smaller // than num_warps_in_block, so we just keep halving the number // of threads in each iteration. Our assumption is always true because // the size of shared_memory equals to the number of warps. #pragma unroll for (int stride = MAX_NUM_WARPS_PER_BLOCK / 2; stride > 0; stride /= 2) { if (tid_in_block + stride < num_warps_in_block) { shared_memory[tid_in_block] += shared_memory[tid_in_block + stride]; } __syncthreads(); } // Return early if only one block is used for reduction. if (num_blocks_in_grid_row == 1) { if (tid_in_grid_row == 0) { write_result(shared_memory[0]); } return; } if (tid_in_block == 0) { block_reductions_buffer[bid_in_grid_row] = shared_memory[0]; } __threadfence(); __syncthreads(); // Grid-level reduction. We use the last block to sum up values // stored in the global block_reductions_buffer. __shared__ bool is_last_block_done; if (tid_in_block == 0) { const int count = atomicAdd(block_done_count_buffer, 1); is_last_block_done = (count == (num_blocks_in_grid_row - 1)); } // All threads in each block see if they belong the last active block // (i.e., the value of is_last_block_done). __syncthreads(); // Only the block which saw that count equals to num_blocks_in_grid_row - 1 can // enter the following block. if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_grid_row); for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (tid_in_block < stride && tid_in_block + stride < num_blocks_in_grid_row) { block_reductions_buffer[tid_in_block] += block_reductions_buffer[tid_in_block + stride]; } __syncthreads(); } // The first thread in the last block assigns the final output. if (tid_in_block == 0) { write_result(block_reductions_buffer[0]); } } } template <typename TIn, typename TOut, typename TBuf, typename TOp, typename TFinalOp, bool DivideResultBySize> __global__ void reduce_matrix_columns_kernel( const int num_rows, const int num_cols, const TIn* const input, TOut* const output, TBuf* const block_reductions_buffer, int* const block_done_counts_buffer) { const int num_blocks_in_grid_row = gridDim.x; const int row_id_in_grid = blockIdx.y; const int num_grid_rows = gridDim.y; // one row per iteration // row_id is int64_t to avoid int overflow in offset calculations for (int64_t row_id = row_id_in_grid; row_id < num_rows; row_id += num_grid_rows) { const TIn* const row_data = input + row_id * num_cols; TOut* const row_output = output + row_id; TBuf* const row_block_reductions_buffer = block_reductions_buffer + row_id * num_blocks_in_grid_row; int* const row_block_done_counts_buffer = block_done_counts_buffer + row_id; reduce_all<TIn, TOut, TBuf, TOp, TFinalOp, DivideResultBySize>( num_cols, row_data, row_output, row_block_reductions_buffer, row_block_done_counts_buffer); } } template <typename TIn, typename TOut, typename TOp, typename TFinalOp, bool DivideResultBySize> Status call_reduce_matrix_columns( cudaStream_t stream, const TIn* input, TOut* output, const int num_rows, const int num_cols, void* buffer, size_t buffer_size) { ORT_ENFORCE(num_rows >= 0 && num_cols >= 0); using TBuf = AccumulationType_t<TIn>; const auto grid_and_block_dims = compute_grid_and_block_dims(num_rows, num_cols); const dim3& grid_dim = grid_and_block_dims.first; const dim3& block_dim = grid_and_block_dims.second; TBuf* block_reductions_buffer; int* block_done_counts_buffer; ORT_RETURN_IF_ERROR(get_reduction_buffers( num_rows, num_cols, buffer, buffer_size, block_reductions_buffer, block_done_counts_buffer)); // If more than one block is used per grid row, then inter-block reduction is needed. if (grid_dim.x > 1) { CUDA_RETURN_IF_ERROR(cudaMemsetAsync(block_done_counts_buffer, 0, num_rows * sizeof(int), stream)); } const int shared_mem_size = sizeof(TBuf) * block_dim.x * block_dim.y / GPU_WARP_SIZE; reduce_matrix_columns_kernel<TIn, TOut, TBuf, TOp, TFinalOp, DivideResultBySize> <<<grid_dim, block_dim, shared_mem_size, stream>>>( num_rows, num_cols, input, output, block_reductions_buffer, block_done_counts_buffer); return Status::OK(); } } // namespace detail template <typename TIn, typename TOut> Status reduce_sum( cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Identity, Identity, false>( stream, input, output, 1, size, buffer, buffer_size); } template <typename TIn, typename TOut> Status reduce_square_sum( cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Square, Identity, false>( stream, input, output, 1, size, buffer, buffer_size); } template <typename TIn, typename TOut> Status reduce_l2_norm( cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Square, Sqrt, false>( stream, input, output, 1, size, buffer, buffer_size); } template <typename TIn, typename TOut> Status reduce_mean( cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Identity, Identity, true>( stream, input, output, 1, size, buffer, buffer_size); } #define INSTANTIATE_REDUCE_SUM(TIn, TOut) \ template Status reduce_sum<TIn, TOut>(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_SUM(half, half); INSTANTIATE_REDUCE_SUM(half, float); INSTANTIATE_REDUCE_SUM(float, float); INSTANTIATE_REDUCE_SUM(double, double); #undef INSTANTIATE_REDUCE_SUM #define INSTANTIATE_REDUCE_SQUARE_SUM(TIn, TOut) \ template Status reduce_square_sum<TIn, TOut>(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_SQUARE_SUM(half, float); INSTANTIATE_REDUCE_SQUARE_SUM(float, float); INSTANTIATE_REDUCE_SQUARE_SUM(double, double); #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_REDUCE_SQUARE_SUM(nv_bfloat16, float); #endif #undef INSTANTIATE_REDUCE_SQUARE_SUM #define INSTANTIATE_REDUCE_L2_NORM(TIn, TOut) \ template Status reduce_l2_norm<TIn, TOut>(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_L2_NORM(half, float); INSTANTIATE_REDUCE_L2_NORM(float, float); INSTANTIATE_REDUCE_L2_NORM(double, double); #undef INSTANTIATE_REDUCE_L2_NORM #define INSTANTIATE_REDUCE_MEAN(TIn, TOut) \ template Status reduce_mean<TIn, TOut>(cudaStream_t stream, const TIn* input, TOut* output, int size, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_MEAN(half, float); INSTANTIATE_REDUCE_MEAN(float, float); INSTANTIATE_REDUCE_MEAN(double, double); #undef INSTANTIATE_REDUCE_MEAN namespace detail { template <typename TIn, typename TOut, typename TBuf> __global__ void reduce_matrix_rows_kernel(const TIn* input, TOut* output, int m, int n) { constexpr int x_load_count_per_thread = 1; constexpr int y_load_count_per_thread = 4; const int t_count_x_in_grid = blockDim.x * gridDim.x; const int t_count_y_in_grid = blockDim.y * gridDim.y; const int x_grid_stride = t_count_x_in_grid * x_load_count_per_thread; const int y_grid_stride = t_count_y_in_grid * y_load_count_per_thread; const int tid_x_in_grid = threadIdx.x + blockDim.x * blockIdx.x; const int tid_y_in_grid = threadIdx.y + blockDim.y * blockIdx.y; const int tid_in_block = threadIdx.x + blockDim.x * threadIdx.y; // Shape is blockDim.y-by-blockDim.x and element type is TBuf. extern __shared__ unsigned char shared_memory_bytes[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_bytes); // to prevent int overflow in index calculation for input size m*n const int64_t n_int64 = static_cast<int64_t>(n); for (int col = tid_x_in_grid; col < n; col += x_grid_stride) { shared_memory[tid_in_block] = TBuf(0.0f); TBuf sum = TBuf(0.0f); // This loops load multiple blockDim.y-by-blockDim.x sub-tensors from the input. for (int row = tid_y_in_grid; row < m; row += y_grid_stride) { // Thread-level reduction. Each thread loads y_load_count_per_thread values // and aggregrate them. #pragma unroll(y_load_count_per_thread) for (int row_inner = 0; row_inner < y_load_count_per_thread; ++row_inner) { int row_final = row + row_inner * t_count_y_in_grid; int col_final = col; if (row_final < m && col_final < n) { sum += TBuf(input[row_final * n_int64 + col_final]); } } } // Write thread-level reduction result into shared memory. shared_memory[tid_in_block] = sum; // Wait all threads to finish their thread-level reductions. __syncthreads(); // This loop conducts reduction on elements stored in shared memory. // Each block reduces blockDim.y-by-blockDim.x tensor to 1-by-blockDim.x tensor. #pragma unroll(4) for (int stride = blockDim.y / 2; stride > 0; stride /= 2) { if (threadIdx.y < stride) { shared_memory[tid_in_block] += shared_memory[tid_in_block + stride * blockDim.x]; } __syncthreads(); } if (threadIdx.y == 0) { atomic_add(output + col, TOut(shared_memory[threadIdx.x])); } } } template <typename TIn, typename TOut, typename TBuf> Status call_reduce_matrix_rows(cudaStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output) { ORT_ENFORCE(m >= 0 && n >= 0); if (reset_initial_output) { CUDA_RETURN_IF_ERROR(cudaMemsetAsync(output, 0, n * sizeof(TOut), stream)); } constexpr int max_num_threads_in_block = 512; constexpr int max_num_blocks_in_grid = 512; constexpr int load_count_per_thread = 4; const int block_x_dim = least_pow2_bound(std::max(1, std::min(n, GPU_WARP_SIZE))); const int block_y_dim = least_pow2_bound(std::max(1, std::min(max_num_threads_in_block / block_x_dim, m / load_count_per_thread))); const int grid_x_dim = std::max(1, std::min(n / block_x_dim, max_num_blocks_in_grid)); const int grid_y_dim = std::max(1, std::min(max_num_blocks_in_grid / grid_x_dim, m / block_y_dim / 4)); const dim3 grid(grid_x_dim, grid_y_dim, 1); const dim3 block(block_x_dim, block_y_dim, 1); reduce_matrix_rows_kernel<TIn, TOut, TBuf><<<grid, block, block.y * block.x * sizeof(TBuf), stream>>>( input, output, m, n); return Status::OK(); } } // namespace detail template <typename T> struct OP_Div { __device__ __inline__ T operator()(const T& a) const { return a / v_; } OP_Div(T v) : v_(v) {} T v_; }; template <typename T> void UnaryDiv(cudaStream_t stream, const T* input, T* output, T denominator, size_t count) { UnaryElementWiseImpl(stream, input, output, OP_Div<T>(denominator), count); } #define INSTANTIATE_UNARY_DIV(T) \ template void UnaryDiv<T>(cudaStream_t stream, const T* input, T* output, T denominator, size_t count) INSTANTIATE_UNARY_DIV(half); INSTANTIATE_UNARY_DIV(float); INSTANTIATE_UNARY_DIV(double); #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_UNARY_DIV(nv_bfloat16); #endif #undef INSTANTIATE_UNARY_DIV template <typename TIn, typename TOut> Status reduce_matrix_rows(cudaStream_t stream, const TIn* input, TOut* output, int m, int n, bool reset_initial_output) { using TBuf = AccumulationType_t<TIn>; return detail::call_reduce_matrix_rows<TIn, TOut, TBuf>(stream, input, output, m, n, reset_initial_output); } #define INSTANTIATE_REDUCE_MATRIX_ROWS(T) \ template Status reduce_matrix_rows<T, T>(cudaStream_t stream, const T* input, T* output, int m, int n, bool reset_initial_output) INSTANTIATE_REDUCE_MATRIX_ROWS(half); INSTANTIATE_REDUCE_MATRIX_ROWS(float); INSTANTIATE_REDUCE_MATRIX_ROWS(double); #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_REDUCE_MATRIX_ROWS(nv_bfloat16); #endif #undef INSTANTIATE_REDUCE_MATRIX_ROWS template <typename TIn, typename TOut> Status reduce_matrix_columns(cudaStream_t stream, const TIn* input, TOut* output, int m, int n, void* buffer, size_t buffer_size) { return detail::call_reduce_matrix_columns<TIn, TOut, Identity, Identity, false>( stream, input, output, m, n, buffer, buffer_size); } #define INSTANTIATE_REDUCE_MATRIX_COLUMNS(T) \ template Status reduce_matrix_columns<T, T>(cudaStream_t stream, const T* input, T* output, int m, int n, void* buffer, size_t buffer_size) INSTANTIATE_REDUCE_MATRIX_COLUMNS(half); INSTANTIATE_REDUCE_MATRIX_COLUMNS(float); INSTANTIATE_REDUCE_MATRIX_COLUMNS(double); #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_REDUCE_MATRIX_COLUMNS(nv_bfloat16); #endif #undef INSTANTIATE_REDUCE_MATRIX_COLUMNS } // namespace cuda } // namespace onnxruntime
f75dcff205e066b668cee93faccea7327286a9ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Vector addition: C = A + B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> //#include "cutil.h" // includes, kernels #include "vectoradd_kernel.cu" #define MAXLINE 100000 //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int); Vector AllocateDeviceVector(const Vector V); Vector AllocateVector(int size, int init); void CopyToDeviceVector(Vector Vdevice, const Vector Vhost); void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice); int ReadFile(Vector* V, char* file_name); void WriteFile(Vector V, char* file_name); void VectorAddOnDevice(const Vector A, const Vector B, Vector C); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // Vectors for the program Vector A; Vector B; Vector C; // Number of elements in the vectors unsigned int size_elements = VSIZE; int errorA = 0, errorB = 0; srand(2012); // Check command line for input vector files if(argc != 3 && argc != 4) { // No inputs provided // Allocate and initialize the vectors A = AllocateVector(VSIZE, 1); B = AllocateVector(VSIZE, 1); C = AllocateVector(VSIZE, 0); } else { // Inputs provided // Allocate and read source vectors from disk A = AllocateVector(VSIZE, 0); B = AllocateVector(VSIZE, 0); C = AllocateVector(VSIZE, 0); errorA = ReadFile(&A, argv[1]); errorB = ReadFile(&B, argv[2]); // check for read errors if(errorA != size_elements || errorB != size_elements) { printf("Error reading input files %d, %d\n", errorA, errorB); return 1; } } // A + B on the device VectorAddOnDevice(A, B, C); // compute the vector addition on the CPU for comparison Vector reference = AllocateVector(VSIZE, 0); computeGold(reference.elements, A.elements, B.elements, VSIZE); // check if the device result is equivalent to the expected solution //CUTBoolean res = cutComparefe(reference.elements, C.elements, // size_elements, 0.0001f); unsigned res = 1; for (unsigned i = 0; i < size_elements; i++) if (abs(reference.elements[i] - C.elements[i]) > 0.0001f) res = 0; printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // output result if output file is requested if(argc == 4) { WriteFile(C, argv[3]); } else if(argc == 2) { WriteFile(C, argv[1]); } // Free host matrices free(A.elements); A.elements = NULL; free(B.elements); B.elements = NULL; free(C.elements); C.elements = NULL; return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void VectorAddOnDevice(const Vector A, const Vector B, Vector C) { //Interface host call to the device kernel code and invoke the kernel //* steps: //* 1. allocate device vectors d_A, d_B and d_C with length same as input vectors Vector d_A = AllocateDeviceVector(A); Vector d_B = AllocateDeviceVector(B); Vector d_C = AllocateDeviceVector(C); //* 2. copy A to d_A, B to d_B CopyToDeviceVector(d_A,A); CopyToDeviceVector(d_B,B); //* 3. launch kernel to compute d_C = d_A + d_B hipLaunchKernelGGL(( VectorAddKernel), dim3(ceil(VSIZE/256.0)), dim3(VSIZE), 0, 0, d_A,d_B,d_C); //* 4. copy d_C back to host vector C CopyFromDeviceVector(C,d_C); //* 5. free device vectors d_A, d_B, d_C hipFree(&d_A); hipFree(&d_B); hipFree(&d_C); } // Allocate a device vector of same size as V. Vector AllocateDeviceVector(const Vector V) { Vector Vdevice = V; int size = V.length * sizeof(float); hipError_t cuda_ret = hipMalloc((void**)&Vdevice.elements, size); if(cuda_ret != hipSuccess) { printf("Unable to allocate device memory"); exit(0); } return Vdevice; } // Allocate a vector of dimensions length // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Vector AllocateVector(int length, int init) { Vector V; V.length = length; V.elements = NULL; V.elements = (float*) malloc(length*sizeof(float)); for(unsigned int i = 0; i < V.length; i++) { V.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); } return V; } // Copy a host vector to a device vector. void CopyToDeviceVector(Vector Vdevice, const Vector Vhost) { int size = Vhost.length * sizeof(float); Vdevice.length = Vhost.length; hipMemcpy(Vdevice.elements, Vhost.elements, size, hipMemcpyHostToDevice); } // Copy a device vector to a host vector. void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice) { int size = Vdevice.length * sizeof(float); hipMemcpy(Vhost.elements, Vdevice.elements, size, hipMemcpyDeviceToHost); } // Read a floating point vector in from file int ReadFile(Vector* V, char* file_name) { unsigned int data_read = VSIZE; FILE* input = fopen(file_name, "r"); char vector_string[MAXLINE]; fgets(vector_string, MAXLINE, input); char* part = strtok(vector_string, " "); for (unsigned i = 0; i < VSIZE; i++) { V->elements[i] = atof(part); part = strtok(NULL, " "); } return data_read; } // Write a floating point vector to file void WriteFile(Vector V, char* file_name) { FILE* output = fopen(file_name, "w"); for (unsigned i = 0; i < VSIZE; i++) { fprintf(output, "%f ", V.elements[i]); } }
f75dcff205e066b668cee93faccea7327286a9ff.cu
/* Vector addition: C = A + B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> //#include "cutil.h" // includes, kernels #include "vectoradd_kernel.cu" #define MAXLINE 100000 //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int); Vector AllocateDeviceVector(const Vector V); Vector AllocateVector(int size, int init); void CopyToDeviceVector(Vector Vdevice, const Vector Vhost); void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice); int ReadFile(Vector* V, char* file_name); void WriteFile(Vector V, char* file_name); void VectorAddOnDevice(const Vector A, const Vector B, Vector C); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // Vectors for the program Vector A; Vector B; Vector C; // Number of elements in the vectors unsigned int size_elements = VSIZE; int errorA = 0, errorB = 0; srand(2012); // Check command line for input vector files if(argc != 3 && argc != 4) { // No inputs provided // Allocate and initialize the vectors A = AllocateVector(VSIZE, 1); B = AllocateVector(VSIZE, 1); C = AllocateVector(VSIZE, 0); } else { // Inputs provided // Allocate and read source vectors from disk A = AllocateVector(VSIZE, 0); B = AllocateVector(VSIZE, 0); C = AllocateVector(VSIZE, 0); errorA = ReadFile(&A, argv[1]); errorB = ReadFile(&B, argv[2]); // check for read errors if(errorA != size_elements || errorB != size_elements) { printf("Error reading input files %d, %d\n", errorA, errorB); return 1; } } // A + B on the device VectorAddOnDevice(A, B, C); // compute the vector addition on the CPU for comparison Vector reference = AllocateVector(VSIZE, 0); computeGold(reference.elements, A.elements, B.elements, VSIZE); // check if the device result is equivalent to the expected solution //CUTBoolean res = cutComparefe(reference.elements, C.elements, // size_elements, 0.0001f); unsigned res = 1; for (unsigned i = 0; i < size_elements; i++) if (abs(reference.elements[i] - C.elements[i]) > 0.0001f) res = 0; printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // output result if output file is requested if(argc == 4) { WriteFile(C, argv[3]); } else if(argc == 2) { WriteFile(C, argv[1]); } // Free host matrices free(A.elements); A.elements = NULL; free(B.elements); B.elements = NULL; free(C.elements); C.elements = NULL; return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void VectorAddOnDevice(const Vector A, const Vector B, Vector C) { //Interface host call to the device kernel code and invoke the kernel //* steps: //* 1. allocate device vectors d_A, d_B and d_C with length same as input vectors Vector d_A = AllocateDeviceVector(A); Vector d_B = AllocateDeviceVector(B); Vector d_C = AllocateDeviceVector(C); //* 2. copy A to d_A, B to d_B CopyToDeviceVector(d_A,A); CopyToDeviceVector(d_B,B); //* 3. launch kernel to compute d_C = d_A + d_B VectorAddKernel<<<ceil(VSIZE/256.0), VSIZE>>>(d_A,d_B,d_C); //* 4. copy d_C back to host vector C CopyFromDeviceVector(C,d_C); //* 5. free device vectors d_A, d_B, d_C cudaFree(&d_A); cudaFree(&d_B); cudaFree(&d_C); } // Allocate a device vector of same size as V. Vector AllocateDeviceVector(const Vector V) { Vector Vdevice = V; int size = V.length * sizeof(float); cudaError_t cuda_ret = cudaMalloc((void**)&Vdevice.elements, size); if(cuda_ret != cudaSuccess) { printf("Unable to allocate device memory"); exit(0); } return Vdevice; } // Allocate a vector of dimensions length // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Vector AllocateVector(int length, int init) { Vector V; V.length = length; V.elements = NULL; V.elements = (float*) malloc(length*sizeof(float)); for(unsigned int i = 0; i < V.length; i++) { V.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); } return V; } // Copy a host vector to a device vector. void CopyToDeviceVector(Vector Vdevice, const Vector Vhost) { int size = Vhost.length * sizeof(float); Vdevice.length = Vhost.length; cudaMemcpy(Vdevice.elements, Vhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device vector to a host vector. void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice) { int size = Vdevice.length * sizeof(float); cudaMemcpy(Vhost.elements, Vdevice.elements, size, cudaMemcpyDeviceToHost); } // Read a floating point vector in from file int ReadFile(Vector* V, char* file_name) { unsigned int data_read = VSIZE; FILE* input = fopen(file_name, "r"); char vector_string[MAXLINE]; fgets(vector_string, MAXLINE, input); char* part = strtok(vector_string, " "); for (unsigned i = 0; i < VSIZE; i++) { V->elements[i] = atof(part); part = strtok(NULL, " "); } return data_read; } // Write a floating point vector to file void WriteFile(Vector V, char* file_name) { FILE* output = fopen(file_name, "w"); for (unsigned i = 0; i < VSIZE; i++) { fprintf(output, "%f ", V.elements[i]); } }
8f4c588fe4190934064170c756e9b149b15d7592.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "star3d3r-64x16-2-256_kernel.hu" #define BENCH_DIM 3 #define BENCH_FPP 37 #define BENCH_RAD 3 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { if (dimsize >= 7 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 4; const AN5D_TYPE __side3Len = 52; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.25000f * A[t%2][i][j][k] + 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2] + 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1] + 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3] + 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k] + 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k] + 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k] + 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k] + 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k] + 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
8f4c588fe4190934064170c756e9b149b15d7592.cu
#include <assert.h> #include <stdio.h> #include "star3d3r-64x16-2-256_kernel.hu" #define BENCH_DIM 3 #define BENCH_FPP 37 #define BENCH_RAD 3 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { if (dimsize >= 7 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 4; const AN5D_TYPE __side3Len = 52; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 10; const AN5D_TYPE __side3Len = 58; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = 0.25000f * A[t%2][i][j][k] + 0.04276f * A[t%2][i][j][k-3] + 0.04176f * A[t%2][i][j][k-2] + 0.04076f * A[t%2][i][j][k-1] + 0.04046f * A[t%2][i][j][k+1] + 0.04146f * A[t%2][i][j][k+2] + 0.04246f * A[t%2][i][j][k+3] + 0.04096f * A[t%2][i-1][j][k] + 0.04066f * A[t%2][i+1][j][k] + 0.04086f * A[t%2][i][j-1][k] + 0.04056f * A[t%2][i][j+1][k] + 0.04196f * A[t%2][i-2][j][k] + 0.04166f * A[t%2][i+2][j][k] + 0.04186f * A[t%2][i][j-2][k] + 0.04156f * A[t%2][i][j+2][k] + 0.04296f * A[t%2][i-3][j][k] + 0.04266f * A[t%2][i+3][j][k] + 0.04286f * A[t%2][i][j-3][k] + 0.04256f * A[t%2][i][j+3][k]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
7c4a938d90c464076f6b341666605f70c2fb9fcc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <unistd.h> #include <hip/hip_runtime_api.h> #include <time.h> #include <errno.h> /***************************************************************************** * * * * * * Compile with: * nvcc -o 2_3_a 2_3_a.cu * * Dr Kevan Buckley, University of Wolverhampton, 2018 ****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {65.78,84.94},{80.66,124.82},{72.60,113.64},{73.25,118.85}, {87.09,111.60},{73.60,99.61},{67.41,113.22},{72.33,105.25}, {66.66,113.69},{85.69,138.86},{65.88,110.02},{74.81,100.78}, {69.99,119.69},{69.96,109.03},{66.97,95.97},{60.83,102.06}, {48.92,92.22},{31.78,70.11},{88.86,140.37},{11.76,58.21}, {84.69,117.22},{77.05,115.49},{71.92,95.04},{ 9.70,47.44}, {97.37,140.03},{92.21,125.84},{42.88,74.76},{60.45,108.15}, {80.69,101.67},{ 4.33,31.86},{78.79,118.41},{71.97,101.20}, {88.61,116.18},{76.62,111.13},{76.71,115.16},{96.03,134.40}, {92.85,134.32},{79.01,112.20},{ 4.22,21.46},{31.16,42.04}, {76.10,99.37},{93.06,117.73},{88.67,122.71},{96.27,123.05}, {58.19,83.25},{68.07,108.38},{12.86,30.19},{49.04,77.19}, {16.44,44.81},{43.75,65.50},{52.12,93.32},{29.31,69.57}, {53.84,94.57},{96.29,116.55},{50.22,88.41},{69.95,99.83}, {95.20,122.15},{34.44,69.00},{69.47,107.12},{ 0.91,28.02}, {29.66,53.79},{ 4.30,29.31},{12.95,34.18},{ 2.76,28.50}, { 6.19,23.96},{78.09,122.77},{16.65,66.25},{73.03,125.98}, {51.04,82.58},{89.00,138.15},{12.50,49.42},{71.70,114.12}, {10.26,38.81},{78.93,128.07},{30.70,46.10},{ 9.55,19.67}, {79.22,95.02},{60.48,96.79},{82.67,106.82},{57.14,91.51}, {82.19,113.94},{32.98,64.19},{72.45,129.00},{13.19,49.45}, {91.62,120.70},{86.07,114.37},{13.23,41.46},{57.73,119.52}, {63.60,97.62},{ 6.67,20.56},{83.57,119.99},{14.51,44.79}, {65.46,101.29},{69.36,91.20},{35.50,64.60},{48.64,58.92}, {84.29,111.71},{ 5.65,25.65},{54.15,72.92},{29.92,67.69}, {19.21,41.51},{91.63,121.28},{95.57,124.52},{24.65,53.85}, {61.85,84.76},{93.80,112.72},{56.37,99.04},{33.41,53.01}, {86.81,112.34},{12.88,39.42},{ 7.05,37.58},{ 5.66,38.58}, {95.20,118.02},{50.13,93.55},{ 7.89,45.65},{15.84,61.16}, { 9.40,34.02},{ 6.69,52.52},{41.84,66.84},{45.61,84.32}, {83.67,119.45},{12.89,35.99},{14.82,44.90},{46.04,81.28}, {76.55,118.50},{ 3.73,41.21},{45.36,67.25},{33.88,64.53}, {92.55,124.17},{39.86,85.34},{88.52,128.15},{49.31,70.56}, {73.49,112.64},{57.96,88.82},{63.00,99.83},{59.79,93.59}, {64.86,118.67},{ 4.18,31.42},{50.59,90.69},{88.98,139.34}, {40.11,66.24},{98.38,123.02},{57.27,105.62},{16.22,41.11}, {30.34,49.43},{87.37,140.74},{18.01,49.80},{ 2.21,11.45}, {75.21,112.46},{26.54,71.74},{74.16,107.29},{30.97,64.03}, {84.37,123.86},{41.01,77.79},{37.72,62.23},{12.93,28.26}, {54.31,73.15},{39.55,81.70},{62.89,100.66},{67.99,109.15}, {87.35,124.70},{ 1.55,31.17},{33.61,50.80},{59.69,105.76}, {32.61,60.19},{63.51,101.36},{ 1.05,23.22},{54.73,92.57}, {56.28,84.65},{80.65,108.52},{89.85,119.70},{28.04,51.66}, {46.12,73.69},{22.41,58.04},{94.15,111.90},{23.99,51.74}, {16.70,38.71},{22.92,70.77},{88.85,129.50},{ 9.37,34.93}, {13.91,49.14},{87.42,120.84},{33.75,90.66},{38.86,60.00}, {95.32,130.46},{52.82,106.97},{23.25,49.61},{70.02,101.12}, {22.57,46.22},{88.25,135.29},{85.46,118.28},{15.36,39.22}, {93.93,119.32},{44.87,72.28},{74.63,117.37},{20.47,38.88}, {58.64,104.16},{77.26,115.23},{73.81,107.74},{44.31,83.05}, {73.33,112.30},{76.13,101.68},{66.14,111.98},{19.30,51.44}, {83.30,126.09},{30.35,58.24},{33.53,68.47},{30.81,55.14}, {94.40,140.26},{16.30,34.96},{15.20,47.46},{41.71,80.03}, {11.08,35.60},{26.14,48.13},{25.37,69.13},{36.07,71.36}, {19.76,33.08},{45.40,68.61},{64.20,111.70},{11.05,43.83}, {35.08,46.97},{23.36,53.34},{76.49,100.85},{20.09,42.43}, {70.47,113.53},{44.40,67.48},{95.32,136.24},{58.11,86.17}, {52.80,93.70},{83.16,107.14},{70.78,122.96},{11.55,32.35}, {58.75,97.71},{52.95,77.08},{30.81,48.93},{95.85,132.94}, {44.50,71.55},{ 0.39,32.70},{34.93,71.22},{41.68,91.73}, {42.71,76.66},{87.49,114.97},{81.65,126.86},{35.88,78.86}, {42.78,93.23},{36.62,57.25},{68.42,106.48},{ 1.02,30.03}, { 1.44,23.22},{46.30,79.52},{12.22,46.90},{43.53,75.44}, { 1.50,14.47},{47.36,62.50},{75.81,112.76},{16.45,48.77}, {24.42,53.77},{13.81,35.41},{97.51,128.68},{54.79,79.80}, {47.90,76.49},{25.27,55.69},{73.38,107.88},{48.62,92.85}, {15.50,38.19},{ 2.04,26.79},{23.24,39.56},{18.89,55.69}, {15.46,35.52},{40.33,63.30},{65.85,90.79},{33.30,71.08}, {44.22,73.63},{71.67,103.74},{91.74,129.75},{82.96,106.84}, { 9.99,33.58},{95.07,132.89},{10.11,34.28},{93.17,140.73}, {79.57,113.71},{90.91,123.45},{68.53,100.33},{30.80,67.13}, {75.92,106.00},{25.37,42.29},{24.28,65.42},{12.45,38.50}, {41.10,70.73},{ 0.44,41.25},{36.74,74.70},{79.37,105.60}, {64.98,102.09},{89.88,134.54},{98.48,147.09},{15.10,36.07}, {28.95,58.48},{98.56,145.43},{53.20,89.24},{57.47,101.11}, {80.42,89.49},{99.85,131.47},{59.01,85.63},{19.64,49.58}, {24.15,49.11},{19.15,47.59},{91.22,124.16},{20.49,61.47}, {40.40,61.57},{86.76,127.32},{67.21,104.20},{35.84,59.15}, { 2.66,39.39},{80.93,113.88},{25.54,68.66},{20.10,68.51}, {12.45,43.00},{ 0.98,19.94},{21.20,53.05},{90.33,114.89}, {32.52,65.61},{63.07,102.47},{69.59,120.30},{66.46,88.79}, { 4.45,40.60},{46.32,60.06},{ 7.53,16.18},{ 9.71,42.26}, {27.23,42.86},{27.76,47.88},{21.45,19.98},{52.37,84.64}, {45.27,86.31},{67.86,99.14},{ 3.11,29.83},{72.59,88.63}, {91.71,132.76},{63.40,112.65},{33.54,67.40},{36.53,74.07}, {78.84,100.02},{ 4.10,21.67},{42.67,71.97},{14.22,46.05}, {45.89,66.53},{12.11,36.29},{69.36,117.95},{52.72,83.50}, {53.09,92.41},{28.52,62.94},{52.89,99.94},{14.36,49.70}, {33.54,58.50},{29.36,64.05},{54.32,79.41},{91.29,135.17}, {68.97,95.29},{60.95,93.37},{38.01,52.19},{13.66,36.57}, { 0.13,30.87},{92.37,115.09},{87.79,130.79},{87.56,130.04}, {84.52,119.77},{54.18,91.22},{40.68,84.46},{99.57,143.57}, {43.30,66.25},{81.93,118.24},{27.55,66.43},{47.14,54.18}, { 8.23,45.57},{12.73,42.70},{36.24,60.69},{88.02,121.50}, {68.71,111.61},{61.03,87.77},{96.08,134.20},{53.94,70.89}, {89.93,130.48},{81.50,108.78},{60.53,80.79},{49.01,79.16}, {75.17,103.32},{41.22,79.69},{29.31,56.01},{48.56,89.38}, {47.35,63.86},{47.29,84.39},{74.83,122.45},{82.09,128.53}, {16.68,57.09},{27.78,69.23},{61.28,82.19},{68.66,111.12}, {34.78,66.88},{ 3.85,12.89},{48.94,78.61},{66.81,97.84}, {36.29,86.04},{27.89,58.21},{91.55,142.66},{15.07,61.46}, {49.66,77.69},{45.34,66.40},{92.36,126.32},{83.72,114.99}, {74.22,98.96},{52.59,63.50},{57.40,89.25},{ 8.11,28.79}, {70.64,98.18},{28.97,64.93},{59.06,97.86},{26.78,55.09}, {30.61,66.34},{86.07,125.23},{63.69,101.98},{34.21,57.52}, {98.15,125.37},{67.61,124.80},{85.06,124.63},{68.99,94.71}, {56.65,88.43},{48.26,76.38},{37.47,109.00},{56.65,123.41}, {24.77,52.15},{37.08,69.44},{57.73,99.72},{ 5.72,30.64}, {53.54,87.65},{59.89,102.05},{85.23,132.20},{52.18,103.33}, {66.06,102.25},{78.83,119.47},{38.97,82.64},{13.94,21.20}, {45.53,48.05},{89.34,131.71},{84.58,130.87},{ 4.58,37.47}, {44.91,54.60},{31.07,66.31},{ 6.41,34.58},{46.37,67.66}, {88.40,120.11},{53.28,75.24},{88.74,128.67},{10.45,37.74}, {65.52,97.32},{62.16,103.38},{72.06,113.98},{53.36,82.72}, {27.69,61.02},{ 5.32,10.52},{22.18,54.06},{32.44,49.68}, { 1.20,38.95},{77.84,119.69},{18.07,37.00},{ 5.27,41.43}, {33.05,65.31},{37.46,55.62},{82.73,124.56},{ 8.11,42.31}, {72.77,103.88},{27.88,44.33},{16.10,29.03},{18.27,59.33}, {54.84,81.40},{68.63,97.16},{ 6.73,34.02},{34.76,73.62}, {43.35,80.93},{82.83,117.96},{22.36,40.83},{ 6.34,46.03}, {52.15,83.33},{52.83,72.35},{26.59,52.77},{38.87,68.02}, {12.23,47.63},{36.72,68.58},{42.18,90.26},{29.34,60.82}, {37.62,86.68},{19.33,27.92},{73.34,108.09},{48.93,73.80}, {72.41,126.86},{54.47,98.81},{35.09,62.59},{46.61,67.81}, {69.08,103.82},{79.73,107.37},{36.27,56.49},{30.85,43.20}, { 8.50,43.86},{59.02,88.38},{69.07,113.97},{17.12,54.82}, {31.09,73.11},{75.12,109.39},{56.58,85.07},{89.64,131.91}, {46.48,90.29},{66.02,113.76},{97.52,123.13},{57.37,83.39}, {37.12,72.21},{ 5.99,35.25},{ 2.90,32.47},{67.30,89.07}, {40.80,65.62},{66.03,103.43},{86.40,105.31},{35.97,72.43}, {52.88,92.48},{65.37,99.35},{97.88,123.46},{42.45,64.70}, {82.41,121.35},{25.18,60.12},{47.93,84.65},{70.53,103.81}, {72.03,103.51},{51.72,70.08},{99.56,142.75},{44.50,70.40}, { 7.72,27.82},{91.01,131.50},{ 8.81,43.72},{11.42,45.66}, {48.85,83.83},{32.25,62.82},{63.74,107.36},{70.70,122.82}, {26.26,62.04},{30.79,64.38},{46.16,80.00},{80.20,144.95}, {51.88,92.05},{ 7.65,37.11},{93.77,115.78},{84.86,132.34}, {92.24,127.84},{50.25,67.89},{25.80,65.91},{81.90,116.78}, {62.26,109.25},{16.44,44.66},{41.08,69.20},{40.91,72.34}, {57.65,103.90},{14.23,53.28},{53.61,80.02},{97.67,116.54}, {41.67,62.16},{89.94,129.01},{47.12,83.97},{ 7.64,29.90}, {39.02,64.24},{81.77,131.44},{65.39,97.70},{65.69,98.38}, {67.20,105.36},{ 1.19,22.92},{90.92,131.26},{ 5.65,13.89}, {75.56,104.27},{29.76,67.34},{ 1.91,43.49},{ 8.29,43.45}, {20.48,41.18},{33.63,70.78},{37.38,70.45},{ 5.48,28.78}, {75.75,120.92},{82.28,135.33},{14.77,74.49},{39.10,83.36}, { 8.86,40.33},{75.41,113.16},{57.56,91.58},{89.33,120.10}, {15.92,30.37},{79.65,114.27},{33.63,64.56},{56.81,84.99}, { 0.68,26.24},{79.57,119.83},{22.91,69.49},{ 9.88,42.80}, { 9.15,45.84},{51.81,87.55},{32.13,89.86},{71.92,113.29}, { 4.74,28.94},{21.16,63.23},{45.47,71.86},{84.21,117.65}, {18.69,64.74},{31.99,47.79},{46.30,66.98},{ 1.12,37.93}, {83.86,134.58},{28.49,75.14},{52.66,63.86},{54.97,85.99}, { 4.79,24.49},{58.55,93.05},{67.40,90.61},{ 4.35,25.67}, {21.58,32.08},{75.69,108.42},{69.79,107.03},{40.41,60.86}, {49.51,82.66},{54.77,92.15},{95.14,116.82},{20.44,39.46}, {16.47,45.72},{90.97,115.53},{ 1.27,36.57},{ 4.98,47.82}, { 3.37,42.17},{87.25,110.00},{68.97,91.94},{66.31,109.22}, {67.60,76.86},{17.43,57.61},{68.35,84.18},{84.88,123.97}, {48.31,78.55},{56.03,91.48},{37.08,52.78},{20.44,48.80}, {21.10,32.35},{63.38,81.85},{97.99,121.65},{29.04,63.52}, {94.79,126.70},{98.68,131.02},{66.07,94.72},{ 0.58,49.60}, {30.35,70.60},{71.38,102.31},{10.39,32.46},{90.88,125.15}, {54.73,80.33},{18.04,55.48},{54.19,84.82},{87.33,128.97}, {20.11,58.77},{88.69,135.20},{50.73,73.95},{41.50,65.20}, {52.09,76.29},{61.36,85.27},{86.80,127.70},{48.84,71.02}, {86.20,130.25},{28.01,73.72},{28.93,68.22},{ 0.93,17.86}, {60.92,103.87},{34.30,72.21},{ 8.65,41.15},{69.58,109.43}, {33.85,64.67},{64.17,83.59},{ 2.17,29.56},{26.90,71.34}, {17.83,44.07},{38.24,65.62},{77.71,104.20},{75.90,126.26}, {42.28,76.92},{62.47,96.95},{ 4.76,24.44},{15.54,50.13}, {79.25,97.12},{70.46,113.54},{ 7.16,32.16},{36.08,62.26}, {96.38,141.09},{71.33,109.52},{56.85,85.48},{87.62,120.82}, {25.45,67.87},{ 6.92,26.77},{71.90,93.01},{45.46,70.53}, {59.16,81.85},{87.99,108.31},{ 0.24,14.80},{81.47,111.72}, {35.34,40.20},{61.84,76.32},{85.96,128.15},{46.62,85.33}, {38.73,60.24},{22.65,26.64},{45.10,69.32},{10.81,55.39}, {72.16,113.00},{ 3.64,40.11},{44.44,93.04},{59.83,102.08}, {33.07,53.38},{88.30,114.97},{16.94,35.62},{58.77,91.32}, {58.84,80.83},{57.79,109.11},{82.11,99.22},{65.81,97.26}, {56.46,89.24},{98.54,138.29},{48.82,72.65},{11.74,38.78}, {89.73,122.59},{14.75,42.60},{85.57,122.56},{16.54,52.18}, {29.02,59.69},{37.02,62.00},{49.71,85.41},{17.62,39.28}, {35.96,73.54},{22.96,46.01},{28.48,74.16},{63.80,98.66}, {82.33,111.20},{12.18,47.98},{51.29,81.89},{86.83,120.17}, {38.94,75.34},{16.84,60.70},{21.13,56.89},{89.28,102.86}, {49.60,86.24},{96.19,148.10},{55.93,102.19},{93.10,118.80}, {66.87,106.87},{21.23,52.69},{19.15,38.20},{49.28,89.46}, {58.54,82.41},{ 8.61,20.99},{46.80,64.82},{21.45,39.17}, {98.06,162.09},{33.55,76.96},{21.72,47.03},{30.75,63.04}, {70.31,108.72},{11.38,36.57},{16.03,41.65},{89.58,132.84}, {82.09,118.12},{90.12,101.79},{62.86,88.28},{63.01,96.38}, {66.04,96.24},{23.56,53.42},{69.99,103.91},{ 2.29,38.48}, {23.46,55.17},{73.82,111.65},{63.50,106.29},{24.67,50.27}, {93.34,116.02},{49.77,83.52},{65.01,90.02},{74.38,119.88}, {98.05,137.44},{60.50,112.18},{17.75,46.40},{48.77,66.10}, { 9.87,36.71},{41.19,60.68},{11.91,42.76},{94.89,143.01}, {38.94,74.93},{18.64,38.54},{66.85,106.14},{46.23,83.12}, {14.12,55.73},{48.36,86.23},{37.79,68.52},{57.58,90.10}, {52.82,103.65},{22.80,54.35},{96.37,128.93},{36.01,71.90}, {56.61,87.72},{28.47,42.19},{97.85,132.03},{54.71,85.06}, {59.97,93.65},{12.80,33.33},{89.46,118.05},{67.74,99.10}, {72.96,110.21},{ 3.37,30.79},{61.08,87.34},{30.27,54.07}, {80.44,113.77},{61.79,107.35},{16.80,36.25},{36.11,87.10}, {23.47,34.65},{42.93,67.18},{59.81,100.74},{ 9.83,31.58}, {20.50,52.04},{69.02,98.83},{68.99,116.05},{86.70,98.10}, {18.14,55.01},{99.32,132.01},{93.04,116.40},{73.44,103.58}, {54.85,93.77},{30.30,73.03},{42.70,69.68},{28.74,47.53}, {36.70,58.24},{ 2.25,20.30},{31.58,69.69},{50.11,73.27}, {15.12,44.77},{94.15,137.43},{56.68,80.90},{96.46,129.14}, { 0.43,29.74},{71.62,117.72},{59.58,96.36},{84.18,129.18}, {41.01,83.94},{37.25,81.88},{52.97,76.90},{82.33,117.77}, {93.01,125.58},{19.82,42.13},{15.34,66.30},{10.41,32.21}, {15.46,27.21},{84.50,112.50},{95.28,139.45},{97.08,127.88}, {47.62,68.52},{22.22,39.32},{35.06,48.35},{98.97,149.30}, {31.27,50.78},{97.44,131.59},{15.17,56.41},{10.45,38.70}, {25.33,58.60},{ 2.11,30.71},{19.70,50.07},{76.22,116.69}, { 3.03, 8.54},{75.53,114.39},{ 7.84,23.87},{38.08,64.11}, {15.47,35.63},{50.02,93.93},{33.98,66.41},{79.23,114.48}, {68.81,96.68},{50.48,97.02},{24.41,57.24},{97.06,131.69}, {27.59,57.96},{ 7.43,36.04},{26.22,54.52},{51.16,91.41}, {55.65,98.61},{84.28,120.89},{42.51,67.14},{28.43,64.76}, { 1.04,56.55},{35.33,73.84},{27.15,59.22},{70.41,102.22}, {88.04,124.07},{19.41,29.02},{22.28,53.20},{88.19,122.10}, {50.02,106.03},{56.03,95.14},{31.24,51.95},{64.07,101.28}, {78.51,103.60},{32.47,68.50},{67.91,110.01},{67.66,93.38}, {63.05,105.49},{ 3.19,43.07},{57.24,101.65},{69.49,97.46}, {49.58,85.88},{94.65,130.13},{54.50,88.30},{43.62,89.43}, {57.97,83.41},{43.13,83.20},{21.00,44.82},{59.35,74.14}, {57.82,75.53},{77.34,97.47},{30.29,64.36},{49.88,84.15}, {41.55,81.68},{96.27,142.28},{54.18,64.01},{78.95,122.31}, {96.53,131.50},{96.33,117.12},{59.78,74.57},{25.86,55.66}, {93.50,136.69},{84.76,119.37},{73.14,106.46},{48.07,74.40}, {22.53,54.86},{ 6.39,42.39},{62.43,81.91},{45.44,67.17}, {76.81,116.23},{94.19,127.34},{31.03,55.92},{21.76,36.42}, {32.47,61.13},{70.47,85.93},{23.19,54.75},{81.57,122.35}, {96.74,134.52},{ 9.15,51.97},{89.90,118.11},{ 2.77,33.70}, { 3.36,29.82},{31.95,64.99},{11.11,25.57},{30.51,46.15}, {22.58,56.37},{60.04,86.98},{64.42,92.98},{ 4.02,28.30}, {52.93,105.09},{68.61,100.56},{97.57,140.89},{91.88,132.20}, { 8.89,35.30},{64.23,94.59},{93.45,139.06},{37.62,44.86}, {14.43,51.46},{32.21,84.10},{80.69,127.51},{33.19,73.49}, { 1.40,36.45},{76.65,107.98},{93.43,122.99},{88.91,121.39}, {81.95,120.81},{20.32,42.43},{56.95,87.28},{80.09,111.05}, {83.63,129.54},{75.02,109.92},{73.08,117.07},{35.44,71.25}, { 7.84,30.65},{33.31,72.21},{68.75,95.19},{41.02,77.76}, {69.90,102.93},{80.38,121.94},{77.02,117.53},{47.01,82.60}, {28.49,64.08},{73.36,117.31},{37.29,73.73},{28.05,57.95}, {71.58,118.36},{30.60,59.11},{ 1.13,14.84},{29.99,54.38}, {15.21,47.19},{14.12,21.18},{97.73,133.90},{69.66,95.75} }; double residual_error (double x , double y , double m , double c){ double e = (m*x) +c - y; return e * e; } __device__ double d_residual_error (double x , double y , double m , double c){ double e = (m*x) +c - y; return e*e; } double rms_error (double m , double c){ int i; double mean; double error_sum =0; for (i=0; i<n_data; i++){ error_sum += residual_error(data[i].x,data [i].y,m,c); } mean = error_sum / n_data; return sqrt (mean); } __global__ void d_rms_error (double *m , double *c, double *error_sum_arr, point_t *d_data){ int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipError_t error; //Device variables double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = hipMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error, hipGetErrorString(error)); exit(1); } //Allocate memory for d_dc error = hipMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error, hipGetErrorString(error)); exit(1); } //Allocate memory for d_data error = hipMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error, hipGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } //Copy memory for dm to d_dm error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error, hipGetErrorString(error)); } //Copy memory for dc to d_dc error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error, hipGetErrorString(error)); } //Copy memory for data to d_data error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error, hipGetErrorString(error)); } for(i=0;i<8;i++) { //Host variable storing the array returned from the kernel function. double h_error_sum_arr[1000]; //Stores the total sum of the values from the error sum array. double error_sum_total; //Stores the mean of the total sum of the error sums. double error_sum_mean; //Call the rms_error function using 100 blocks and 10 threads. hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i], &d_dc[i], d_error_sum_arr, d_data); hipDeviceSynchronize(); //Copy memory for d_error_sum_arr error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost); if(error){ fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error, hipGetErrorString(error)); } //Loop through the error sum array returned from the kernel function for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } //Calculate the mean for the error sum. error_sum_mean = error_sum_total / n_data; //Calculate the square root for the error sum mean. e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } //printf("best m,c is %lf,%lf with error %lf in direction %d\n", //dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = hipFree(d_dm); if(error){ fprintf(stderr, "hipFree on d_dm returned %d %s\n", error, hipGetErrorString(error)); exit(1); } //Free memory for d_dc error = hipFree(d_dc); if(error){ fprintf(stderr, "hipFree on d_dc returned %d %s\n", error, hipGetErrorString(error)); exit(1); } //Free memory for d_data error = hipFree(d_data); if(error){ fprintf(stderr, "hipFree on d_data returned %d %s\n", error, hipGetErrorString(error)); exit(1); } //Free memory for d_error_sum_arr error = hipFree(d_error_sum_arr); if(error){ fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error, hipGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); //Get the system time after we have run the linear regression function. clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the time spent between the start time and end time. time_difference(&start, &finish, &time_elapsed); //Output the time spent running the program. printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
7c4a938d90c464076f6b341666605f70c2fb9fcc.cu
#include <stdio.h> #include <math.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <time.h> #include <errno.h> /***************************************************************************** * * * * * * Compile with: * nvcc -o 2_3_a 2_3_a.cu * * Dr Kevan Buckley, University of Wolverhampton, 2018 ****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {65.78,84.94},{80.66,124.82},{72.60,113.64},{73.25,118.85}, {87.09,111.60},{73.60,99.61},{67.41,113.22},{72.33,105.25}, {66.66,113.69},{85.69,138.86},{65.88,110.02},{74.81,100.78}, {69.99,119.69},{69.96,109.03},{66.97,95.97},{60.83,102.06}, {48.92,92.22},{31.78,70.11},{88.86,140.37},{11.76,58.21}, {84.69,117.22},{77.05,115.49},{71.92,95.04},{ 9.70,47.44}, {97.37,140.03},{92.21,125.84},{42.88,74.76},{60.45,108.15}, {80.69,101.67},{ 4.33,31.86},{78.79,118.41},{71.97,101.20}, {88.61,116.18},{76.62,111.13},{76.71,115.16},{96.03,134.40}, {92.85,134.32},{79.01,112.20},{ 4.22,21.46},{31.16,42.04}, {76.10,99.37},{93.06,117.73},{88.67,122.71},{96.27,123.05}, {58.19,83.25},{68.07,108.38},{12.86,30.19},{49.04,77.19}, {16.44,44.81},{43.75,65.50},{52.12,93.32},{29.31,69.57}, {53.84,94.57},{96.29,116.55},{50.22,88.41},{69.95,99.83}, {95.20,122.15},{34.44,69.00},{69.47,107.12},{ 0.91,28.02}, {29.66,53.79},{ 4.30,29.31},{12.95,34.18},{ 2.76,28.50}, { 6.19,23.96},{78.09,122.77},{16.65,66.25},{73.03,125.98}, {51.04,82.58},{89.00,138.15},{12.50,49.42},{71.70,114.12}, {10.26,38.81},{78.93,128.07},{30.70,46.10},{ 9.55,19.67}, {79.22,95.02},{60.48,96.79},{82.67,106.82},{57.14,91.51}, {82.19,113.94},{32.98,64.19},{72.45,129.00},{13.19,49.45}, {91.62,120.70},{86.07,114.37},{13.23,41.46},{57.73,119.52}, {63.60,97.62},{ 6.67,20.56},{83.57,119.99},{14.51,44.79}, {65.46,101.29},{69.36,91.20},{35.50,64.60},{48.64,58.92}, {84.29,111.71},{ 5.65,25.65},{54.15,72.92},{29.92,67.69}, {19.21,41.51},{91.63,121.28},{95.57,124.52},{24.65,53.85}, {61.85,84.76},{93.80,112.72},{56.37,99.04},{33.41,53.01}, {86.81,112.34},{12.88,39.42},{ 7.05,37.58},{ 5.66,38.58}, {95.20,118.02},{50.13,93.55},{ 7.89,45.65},{15.84,61.16}, { 9.40,34.02},{ 6.69,52.52},{41.84,66.84},{45.61,84.32}, {83.67,119.45},{12.89,35.99},{14.82,44.90},{46.04,81.28}, {76.55,118.50},{ 3.73,41.21},{45.36,67.25},{33.88,64.53}, {92.55,124.17},{39.86,85.34},{88.52,128.15},{49.31,70.56}, {73.49,112.64},{57.96,88.82},{63.00,99.83},{59.79,93.59}, {64.86,118.67},{ 4.18,31.42},{50.59,90.69},{88.98,139.34}, {40.11,66.24},{98.38,123.02},{57.27,105.62},{16.22,41.11}, {30.34,49.43},{87.37,140.74},{18.01,49.80},{ 2.21,11.45}, {75.21,112.46},{26.54,71.74},{74.16,107.29},{30.97,64.03}, {84.37,123.86},{41.01,77.79},{37.72,62.23},{12.93,28.26}, {54.31,73.15},{39.55,81.70},{62.89,100.66},{67.99,109.15}, {87.35,124.70},{ 1.55,31.17},{33.61,50.80},{59.69,105.76}, {32.61,60.19},{63.51,101.36},{ 1.05,23.22},{54.73,92.57}, {56.28,84.65},{80.65,108.52},{89.85,119.70},{28.04,51.66}, {46.12,73.69},{22.41,58.04},{94.15,111.90},{23.99,51.74}, {16.70,38.71},{22.92,70.77},{88.85,129.50},{ 9.37,34.93}, {13.91,49.14},{87.42,120.84},{33.75,90.66},{38.86,60.00}, {95.32,130.46},{52.82,106.97},{23.25,49.61},{70.02,101.12}, {22.57,46.22},{88.25,135.29},{85.46,118.28},{15.36,39.22}, {93.93,119.32},{44.87,72.28},{74.63,117.37},{20.47,38.88}, {58.64,104.16},{77.26,115.23},{73.81,107.74},{44.31,83.05}, {73.33,112.30},{76.13,101.68},{66.14,111.98},{19.30,51.44}, {83.30,126.09},{30.35,58.24},{33.53,68.47},{30.81,55.14}, {94.40,140.26},{16.30,34.96},{15.20,47.46},{41.71,80.03}, {11.08,35.60},{26.14,48.13},{25.37,69.13},{36.07,71.36}, {19.76,33.08},{45.40,68.61},{64.20,111.70},{11.05,43.83}, {35.08,46.97},{23.36,53.34},{76.49,100.85},{20.09,42.43}, {70.47,113.53},{44.40,67.48},{95.32,136.24},{58.11,86.17}, {52.80,93.70},{83.16,107.14},{70.78,122.96},{11.55,32.35}, {58.75,97.71},{52.95,77.08},{30.81,48.93},{95.85,132.94}, {44.50,71.55},{ 0.39,32.70},{34.93,71.22},{41.68,91.73}, {42.71,76.66},{87.49,114.97},{81.65,126.86},{35.88,78.86}, {42.78,93.23},{36.62,57.25},{68.42,106.48},{ 1.02,30.03}, { 1.44,23.22},{46.30,79.52},{12.22,46.90},{43.53,75.44}, { 1.50,14.47},{47.36,62.50},{75.81,112.76},{16.45,48.77}, {24.42,53.77},{13.81,35.41},{97.51,128.68},{54.79,79.80}, {47.90,76.49},{25.27,55.69},{73.38,107.88},{48.62,92.85}, {15.50,38.19},{ 2.04,26.79},{23.24,39.56},{18.89,55.69}, {15.46,35.52},{40.33,63.30},{65.85,90.79},{33.30,71.08}, {44.22,73.63},{71.67,103.74},{91.74,129.75},{82.96,106.84}, { 9.99,33.58},{95.07,132.89},{10.11,34.28},{93.17,140.73}, {79.57,113.71},{90.91,123.45},{68.53,100.33},{30.80,67.13}, {75.92,106.00},{25.37,42.29},{24.28,65.42},{12.45,38.50}, {41.10,70.73},{ 0.44,41.25},{36.74,74.70},{79.37,105.60}, {64.98,102.09},{89.88,134.54},{98.48,147.09},{15.10,36.07}, {28.95,58.48},{98.56,145.43},{53.20,89.24},{57.47,101.11}, {80.42,89.49},{99.85,131.47},{59.01,85.63},{19.64,49.58}, {24.15,49.11},{19.15,47.59},{91.22,124.16},{20.49,61.47}, {40.40,61.57},{86.76,127.32},{67.21,104.20},{35.84,59.15}, { 2.66,39.39},{80.93,113.88},{25.54,68.66},{20.10,68.51}, {12.45,43.00},{ 0.98,19.94},{21.20,53.05},{90.33,114.89}, {32.52,65.61},{63.07,102.47},{69.59,120.30},{66.46,88.79}, { 4.45,40.60},{46.32,60.06},{ 7.53,16.18},{ 9.71,42.26}, {27.23,42.86},{27.76,47.88},{21.45,19.98},{52.37,84.64}, {45.27,86.31},{67.86,99.14},{ 3.11,29.83},{72.59,88.63}, {91.71,132.76},{63.40,112.65},{33.54,67.40},{36.53,74.07}, {78.84,100.02},{ 4.10,21.67},{42.67,71.97},{14.22,46.05}, {45.89,66.53},{12.11,36.29},{69.36,117.95},{52.72,83.50}, {53.09,92.41},{28.52,62.94},{52.89,99.94},{14.36,49.70}, {33.54,58.50},{29.36,64.05},{54.32,79.41},{91.29,135.17}, {68.97,95.29},{60.95,93.37},{38.01,52.19},{13.66,36.57}, { 0.13,30.87},{92.37,115.09},{87.79,130.79},{87.56,130.04}, {84.52,119.77},{54.18,91.22},{40.68,84.46},{99.57,143.57}, {43.30,66.25},{81.93,118.24},{27.55,66.43},{47.14,54.18}, { 8.23,45.57},{12.73,42.70},{36.24,60.69},{88.02,121.50}, {68.71,111.61},{61.03,87.77},{96.08,134.20},{53.94,70.89}, {89.93,130.48},{81.50,108.78},{60.53,80.79},{49.01,79.16}, {75.17,103.32},{41.22,79.69},{29.31,56.01},{48.56,89.38}, {47.35,63.86},{47.29,84.39},{74.83,122.45},{82.09,128.53}, {16.68,57.09},{27.78,69.23},{61.28,82.19},{68.66,111.12}, {34.78,66.88},{ 3.85,12.89},{48.94,78.61},{66.81,97.84}, {36.29,86.04},{27.89,58.21},{91.55,142.66},{15.07,61.46}, {49.66,77.69},{45.34,66.40},{92.36,126.32},{83.72,114.99}, {74.22,98.96},{52.59,63.50},{57.40,89.25},{ 8.11,28.79}, {70.64,98.18},{28.97,64.93},{59.06,97.86},{26.78,55.09}, {30.61,66.34},{86.07,125.23},{63.69,101.98},{34.21,57.52}, {98.15,125.37},{67.61,124.80},{85.06,124.63},{68.99,94.71}, {56.65,88.43},{48.26,76.38},{37.47,109.00},{56.65,123.41}, {24.77,52.15},{37.08,69.44},{57.73,99.72},{ 5.72,30.64}, {53.54,87.65},{59.89,102.05},{85.23,132.20},{52.18,103.33}, {66.06,102.25},{78.83,119.47},{38.97,82.64},{13.94,21.20}, {45.53,48.05},{89.34,131.71},{84.58,130.87},{ 4.58,37.47}, {44.91,54.60},{31.07,66.31},{ 6.41,34.58},{46.37,67.66}, {88.40,120.11},{53.28,75.24},{88.74,128.67},{10.45,37.74}, {65.52,97.32},{62.16,103.38},{72.06,113.98},{53.36,82.72}, {27.69,61.02},{ 5.32,10.52},{22.18,54.06},{32.44,49.68}, { 1.20,38.95},{77.84,119.69},{18.07,37.00},{ 5.27,41.43}, {33.05,65.31},{37.46,55.62},{82.73,124.56},{ 8.11,42.31}, {72.77,103.88},{27.88,44.33},{16.10,29.03},{18.27,59.33}, {54.84,81.40},{68.63,97.16},{ 6.73,34.02},{34.76,73.62}, {43.35,80.93},{82.83,117.96},{22.36,40.83},{ 6.34,46.03}, {52.15,83.33},{52.83,72.35},{26.59,52.77},{38.87,68.02}, {12.23,47.63},{36.72,68.58},{42.18,90.26},{29.34,60.82}, {37.62,86.68},{19.33,27.92},{73.34,108.09},{48.93,73.80}, {72.41,126.86},{54.47,98.81},{35.09,62.59},{46.61,67.81}, {69.08,103.82},{79.73,107.37},{36.27,56.49},{30.85,43.20}, { 8.50,43.86},{59.02,88.38},{69.07,113.97},{17.12,54.82}, {31.09,73.11},{75.12,109.39},{56.58,85.07},{89.64,131.91}, {46.48,90.29},{66.02,113.76},{97.52,123.13},{57.37,83.39}, {37.12,72.21},{ 5.99,35.25},{ 2.90,32.47},{67.30,89.07}, {40.80,65.62},{66.03,103.43},{86.40,105.31},{35.97,72.43}, {52.88,92.48},{65.37,99.35},{97.88,123.46},{42.45,64.70}, {82.41,121.35},{25.18,60.12},{47.93,84.65},{70.53,103.81}, {72.03,103.51},{51.72,70.08},{99.56,142.75},{44.50,70.40}, { 7.72,27.82},{91.01,131.50},{ 8.81,43.72},{11.42,45.66}, {48.85,83.83},{32.25,62.82},{63.74,107.36},{70.70,122.82}, {26.26,62.04},{30.79,64.38},{46.16,80.00},{80.20,144.95}, {51.88,92.05},{ 7.65,37.11},{93.77,115.78},{84.86,132.34}, {92.24,127.84},{50.25,67.89},{25.80,65.91},{81.90,116.78}, {62.26,109.25},{16.44,44.66},{41.08,69.20},{40.91,72.34}, {57.65,103.90},{14.23,53.28},{53.61,80.02},{97.67,116.54}, {41.67,62.16},{89.94,129.01},{47.12,83.97},{ 7.64,29.90}, {39.02,64.24},{81.77,131.44},{65.39,97.70},{65.69,98.38}, {67.20,105.36},{ 1.19,22.92},{90.92,131.26},{ 5.65,13.89}, {75.56,104.27},{29.76,67.34},{ 1.91,43.49},{ 8.29,43.45}, {20.48,41.18},{33.63,70.78},{37.38,70.45},{ 5.48,28.78}, {75.75,120.92},{82.28,135.33},{14.77,74.49},{39.10,83.36}, { 8.86,40.33},{75.41,113.16},{57.56,91.58},{89.33,120.10}, {15.92,30.37},{79.65,114.27},{33.63,64.56},{56.81,84.99}, { 0.68,26.24},{79.57,119.83},{22.91,69.49},{ 9.88,42.80}, { 9.15,45.84},{51.81,87.55},{32.13,89.86},{71.92,113.29}, { 4.74,28.94},{21.16,63.23},{45.47,71.86},{84.21,117.65}, {18.69,64.74},{31.99,47.79},{46.30,66.98},{ 1.12,37.93}, {83.86,134.58},{28.49,75.14},{52.66,63.86},{54.97,85.99}, { 4.79,24.49},{58.55,93.05},{67.40,90.61},{ 4.35,25.67}, {21.58,32.08},{75.69,108.42},{69.79,107.03},{40.41,60.86}, {49.51,82.66},{54.77,92.15},{95.14,116.82},{20.44,39.46}, {16.47,45.72},{90.97,115.53},{ 1.27,36.57},{ 4.98,47.82}, { 3.37,42.17},{87.25,110.00},{68.97,91.94},{66.31,109.22}, {67.60,76.86},{17.43,57.61},{68.35,84.18},{84.88,123.97}, {48.31,78.55},{56.03,91.48},{37.08,52.78},{20.44,48.80}, {21.10,32.35},{63.38,81.85},{97.99,121.65},{29.04,63.52}, {94.79,126.70},{98.68,131.02},{66.07,94.72},{ 0.58,49.60}, {30.35,70.60},{71.38,102.31},{10.39,32.46},{90.88,125.15}, {54.73,80.33},{18.04,55.48},{54.19,84.82},{87.33,128.97}, {20.11,58.77},{88.69,135.20},{50.73,73.95},{41.50,65.20}, {52.09,76.29},{61.36,85.27},{86.80,127.70},{48.84,71.02}, {86.20,130.25},{28.01,73.72},{28.93,68.22},{ 0.93,17.86}, {60.92,103.87},{34.30,72.21},{ 8.65,41.15},{69.58,109.43}, {33.85,64.67},{64.17,83.59},{ 2.17,29.56},{26.90,71.34}, {17.83,44.07},{38.24,65.62},{77.71,104.20},{75.90,126.26}, {42.28,76.92},{62.47,96.95},{ 4.76,24.44},{15.54,50.13}, {79.25,97.12},{70.46,113.54},{ 7.16,32.16},{36.08,62.26}, {96.38,141.09},{71.33,109.52},{56.85,85.48},{87.62,120.82}, {25.45,67.87},{ 6.92,26.77},{71.90,93.01},{45.46,70.53}, {59.16,81.85},{87.99,108.31},{ 0.24,14.80},{81.47,111.72}, {35.34,40.20},{61.84,76.32},{85.96,128.15},{46.62,85.33}, {38.73,60.24},{22.65,26.64},{45.10,69.32},{10.81,55.39}, {72.16,113.00},{ 3.64,40.11},{44.44,93.04},{59.83,102.08}, {33.07,53.38},{88.30,114.97},{16.94,35.62},{58.77,91.32}, {58.84,80.83},{57.79,109.11},{82.11,99.22},{65.81,97.26}, {56.46,89.24},{98.54,138.29},{48.82,72.65},{11.74,38.78}, {89.73,122.59},{14.75,42.60},{85.57,122.56},{16.54,52.18}, {29.02,59.69},{37.02,62.00},{49.71,85.41},{17.62,39.28}, {35.96,73.54},{22.96,46.01},{28.48,74.16},{63.80,98.66}, {82.33,111.20},{12.18,47.98},{51.29,81.89},{86.83,120.17}, {38.94,75.34},{16.84,60.70},{21.13,56.89},{89.28,102.86}, {49.60,86.24},{96.19,148.10},{55.93,102.19},{93.10,118.80}, {66.87,106.87},{21.23,52.69},{19.15,38.20},{49.28,89.46}, {58.54,82.41},{ 8.61,20.99},{46.80,64.82},{21.45,39.17}, {98.06,162.09},{33.55,76.96},{21.72,47.03},{30.75,63.04}, {70.31,108.72},{11.38,36.57},{16.03,41.65},{89.58,132.84}, {82.09,118.12},{90.12,101.79},{62.86,88.28},{63.01,96.38}, {66.04,96.24},{23.56,53.42},{69.99,103.91},{ 2.29,38.48}, {23.46,55.17},{73.82,111.65},{63.50,106.29},{24.67,50.27}, {93.34,116.02},{49.77,83.52},{65.01,90.02},{74.38,119.88}, {98.05,137.44},{60.50,112.18},{17.75,46.40},{48.77,66.10}, { 9.87,36.71},{41.19,60.68},{11.91,42.76},{94.89,143.01}, {38.94,74.93},{18.64,38.54},{66.85,106.14},{46.23,83.12}, {14.12,55.73},{48.36,86.23},{37.79,68.52},{57.58,90.10}, {52.82,103.65},{22.80,54.35},{96.37,128.93},{36.01,71.90}, {56.61,87.72},{28.47,42.19},{97.85,132.03},{54.71,85.06}, {59.97,93.65},{12.80,33.33},{89.46,118.05},{67.74,99.10}, {72.96,110.21},{ 3.37,30.79},{61.08,87.34},{30.27,54.07}, {80.44,113.77},{61.79,107.35},{16.80,36.25},{36.11,87.10}, {23.47,34.65},{42.93,67.18},{59.81,100.74},{ 9.83,31.58}, {20.50,52.04},{69.02,98.83},{68.99,116.05},{86.70,98.10}, {18.14,55.01},{99.32,132.01},{93.04,116.40},{73.44,103.58}, {54.85,93.77},{30.30,73.03},{42.70,69.68},{28.74,47.53}, {36.70,58.24},{ 2.25,20.30},{31.58,69.69},{50.11,73.27}, {15.12,44.77},{94.15,137.43},{56.68,80.90},{96.46,129.14}, { 0.43,29.74},{71.62,117.72},{59.58,96.36},{84.18,129.18}, {41.01,83.94},{37.25,81.88},{52.97,76.90},{82.33,117.77}, {93.01,125.58},{19.82,42.13},{15.34,66.30},{10.41,32.21}, {15.46,27.21},{84.50,112.50},{95.28,139.45},{97.08,127.88}, {47.62,68.52},{22.22,39.32},{35.06,48.35},{98.97,149.30}, {31.27,50.78},{97.44,131.59},{15.17,56.41},{10.45,38.70}, {25.33,58.60},{ 2.11,30.71},{19.70,50.07},{76.22,116.69}, { 3.03, 8.54},{75.53,114.39},{ 7.84,23.87},{38.08,64.11}, {15.47,35.63},{50.02,93.93},{33.98,66.41},{79.23,114.48}, {68.81,96.68},{50.48,97.02},{24.41,57.24},{97.06,131.69}, {27.59,57.96},{ 7.43,36.04},{26.22,54.52},{51.16,91.41}, {55.65,98.61},{84.28,120.89},{42.51,67.14},{28.43,64.76}, { 1.04,56.55},{35.33,73.84},{27.15,59.22},{70.41,102.22}, {88.04,124.07},{19.41,29.02},{22.28,53.20},{88.19,122.10}, {50.02,106.03},{56.03,95.14},{31.24,51.95},{64.07,101.28}, {78.51,103.60},{32.47,68.50},{67.91,110.01},{67.66,93.38}, {63.05,105.49},{ 3.19,43.07},{57.24,101.65},{69.49,97.46}, {49.58,85.88},{94.65,130.13},{54.50,88.30},{43.62,89.43}, {57.97,83.41},{43.13,83.20},{21.00,44.82},{59.35,74.14}, {57.82,75.53},{77.34,97.47},{30.29,64.36},{49.88,84.15}, {41.55,81.68},{96.27,142.28},{54.18,64.01},{78.95,122.31}, {96.53,131.50},{96.33,117.12},{59.78,74.57},{25.86,55.66}, {93.50,136.69},{84.76,119.37},{73.14,106.46},{48.07,74.40}, {22.53,54.86},{ 6.39,42.39},{62.43,81.91},{45.44,67.17}, {76.81,116.23},{94.19,127.34},{31.03,55.92},{21.76,36.42}, {32.47,61.13},{70.47,85.93},{23.19,54.75},{81.57,122.35}, {96.74,134.52},{ 9.15,51.97},{89.90,118.11},{ 2.77,33.70}, { 3.36,29.82},{31.95,64.99},{11.11,25.57},{30.51,46.15}, {22.58,56.37},{60.04,86.98},{64.42,92.98},{ 4.02,28.30}, {52.93,105.09},{68.61,100.56},{97.57,140.89},{91.88,132.20}, { 8.89,35.30},{64.23,94.59},{93.45,139.06},{37.62,44.86}, {14.43,51.46},{32.21,84.10},{80.69,127.51},{33.19,73.49}, { 1.40,36.45},{76.65,107.98},{93.43,122.99},{88.91,121.39}, {81.95,120.81},{20.32,42.43},{56.95,87.28},{80.09,111.05}, {83.63,129.54},{75.02,109.92},{73.08,117.07},{35.44,71.25}, { 7.84,30.65},{33.31,72.21},{68.75,95.19},{41.02,77.76}, {69.90,102.93},{80.38,121.94},{77.02,117.53},{47.01,82.60}, {28.49,64.08},{73.36,117.31},{37.29,73.73},{28.05,57.95}, {71.58,118.36},{30.60,59.11},{ 1.13,14.84},{29.99,54.38}, {15.21,47.19},{14.12,21.18},{97.73,133.90},{69.66,95.75} }; double residual_error (double x , double y , double m , double c){ double e = (m*x) +c - y; return e * e; } __device__ double d_residual_error (double x , double y , double m , double c){ double e = (m*x) +c - y; return e*e; } double rms_error (double m , double c){ int i; double mean; double error_sum =0; for (i=0; i<n_data; i++){ error_sum += residual_error(data[i].x,data [i].y,m,c); } mean = error_sum / n_data; return sqrt (mean); } __global__ void d_rms_error (double *m , double *c, double *error_sum_arr, point_t *d_data){ int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; //Device variables double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_dc error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_data error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } //Copy memory for dm to d_dm error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for dc to d_dc error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for data to d_data error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { //Host variable storing the array returned from the kernel function. double h_error_sum_arr[1000]; //Stores the total sum of the values from the error sum array. double error_sum_total; //Stores the mean of the total sum of the error sums. double error_sum_mean; //Call the rms_error function using 100 blocks and 10 threads. d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); //Copy memory for d_error_sum_arr error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } //Loop through the error sum array returned from the kernel function for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } //Calculate the mean for the error sum. error_sum_mean = error_sum_total / n_data; //Calculate the square root for the error sum mean. e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } //printf("best m,c is %lf,%lf with error %lf in direction %d\n", //dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_dc error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_data error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_error_sum_arr error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); //Get the system time after we have run the linear regression function. clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the time spent between the start time and end time. time_difference(&start, &finish, &time_elapsed); //Output the time spent running the program. printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
d04ce2a65bf4fd6e2288b4d77b27bff04c4fa895.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <complex.h> // includes, project #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hipfft.h> #include <hip/hip_complex.h> #include <helper_functions.h> #include <helper_cuda.h> #include <timer.h> #include <hip/hip_runtime_api.h> // include parameters for DNS #include "dnsparams.h" #include "solver.h" #include "statistics.h" #include "cudafuncs.h" #include "iofuncs.h" #include "initialize.h" #include "fftfuncs.h" #include "struct_def.h" #include "declare.h" #include "allocate.h" void splitData(int numGPUs, gpuinfo *gpu) { int i, n; gpu->nGPUs = numGPUs; // Allocate pinned memory on the host that stores GPU info hipHostMalloc((void**)&gpu->gpunum, numGPUs*sizeof(gpu->gpunum), hipHostMallocMapped); hipHostMalloc((void**)&gpu->ny, numGPUs*sizeof(gpu->ny), hipHostMallocMapped); hipHostMalloc((void**)&gpu->nx, numGPUs*sizeof(gpu->nx), hipHostMallocMapped); hipHostMalloc((void**)&gpu->start_y, numGPUs*sizeof(gpu->start_y), hipHostMallocMapped); hipHostMalloc((void**)&gpu->start_x, numGPUs*sizeof(gpu->start_x), hipHostMallocMapped); // Add numGPUs to each GPU struct: for(i=0;i<numGPUs;++i){ gpu->gpunum[i] = i; } // Splitting data in x-direction if(NX % numGPUs == 0){ for (i=0; i<numGPUs; ++i){ gpu->nx[i] = NX/numGPUs; gpu->start_x[i] = i*gpu->nx[i]; } } else { printf("Warning: number of GPUs is not an even multiple of the data size\n"); n = NX/numGPUs; for(i=0; i<(numGPUs-1); ++i){ gpu->nx[i] = n; gpu->start_x[i] = i*gpu->nx[i]; } gpu->nx[numGPUs-1] = n + NX%numGPUs; gpu->start_x[numGPUs-1] = (numGPUs-1)*n; } // Now splitting data across y-direction if(NY % numGPUs == 0){ for (i=0; i<numGPUs; ++i){ gpu->ny[i] = NY/numGPUs; gpu->start_y[i] = i*gpu->ny[i]; } } else { printf("Warning: number of GPUs is not an even multiple of the data size\n"); n = NY/numGPUs; for(i=0; i<(numGPUs-1); ++i){ gpu->ny[i] = n; gpu->start_y[i] = i*gpu->ny[i]; } gpu->ny[numGPUs-1] = n + NY%numGPUs; gpu->start_y[numGPUs-1] = (numGPUs-1)*n; } return; } int main (void) { //===================================================================================================== // Program Start-up //===================================================================================================== // Set GPU's to use and list device properties int n, nGPUs; // Query number of devices attached to host // hipGetDeviceCount(&nGPUs); nGPUs=2; printf("Welcome to the GPU-based Navier-Stokes Solver! Configuration: \n" "Number of GPUs = %d \n " "Grid size = %dx%dx%d \n ",nGPUs,NX,NY,NZ); // List properties of each device displayDeviceProps(nGPUs); //===================================================================================================== // Allocate Memory //===================================================================================================== splitData(nGPUs, &gpu); // Variables declared in "declare.h" // Allocate memory for variables allocate_memory(); // Create plans for cuFFT on each GPU plan1dFFT(nGPUs, fft); plan2dFFT(gpu, fft); printf("FFT's successfully configured!\n"); // Declare variables int c = 0; int euler = 0; double time=0.0; double steptime=0.0; //======================================================================================================= // Initialize simulation //======================================================================================================= // printf("Starting Timer...\n"); // StartTimer(); // Setup wavespace domain initializeWaveNumbers(gpu, k); // Launch CUDA kernel to initialize velocity field //importVelocity(gpu, h_vel, vel); //importScalar(gpu, h_vel, vel); // initializeTaylorGreen(gpu,vel); initializeJet_Superposition(fft, gpu, k, h_vel, vel, rhs); // Does not require importData // initializeJet_Convolution(fft, gpu, h_vel, vel, rhs); // Does not require importData // Save Initial Data to file (t = 0) // Copy data to host save3Dfields(c, fft, gpu, h_vel, vel); save2Dfields(c, fft, gpu, h_vel, vel); synchronizeGPUs(nGPUs); // Transform velocity to fourier space for timestepping forwardTransform(fft, gpu, vel.u); forwardTransform(fft, gpu, vel.v); forwardTransform(fft, gpu, vel.w); forwardTransform(fft, gpu, vel.s); // Dealias the solution by truncating RHS deAlias(gpu, k, vel); // Calculate statistics at initial condition calcTurbStats_mgpu(0, gpu, fft, k, vel, rhs, stats, Yprofile); // Synchronize GPUs before entering timestepping loop synchronizeGPUs(nGPUs); // Print statistics to screen int stats_count = 0; printTurbStats(stats_count,0.0,stats[0]); // Using 0 index to send aggregate data collected in first index stats_count += 1; // Start iteration timer // StartTimer(); hipProfilerStart(); //================================================================================================== // Enter time-stepping loop //================================================================================================== for ( c = 1; c <= nt; ++c ){ // Start iteration timer StartTimer(); // Create flags to specify Euler timesteps if (c == 1){ euler = 1; } else{ euler = 0; } // Call pseudospectral Navier-Stokes solver solver_ps(euler, fft, gpu, vel, rhs, rhs_old, k, temp_advective); //============================================================================================== // Calculate bulk turbulence statistics and print to screen //============================================================================================== if(c % n_stats == 0){ calcTurbStats_mgpu(c, gpu, fft, k, vel, rhs, stats, Yprofile); // Get elapsed time from Timer steptime = GetTimer(); // Print statistics to screen printTurbStats(stats_count,steptime,stats[0]); // Using 0 index to send aggregate data collected in first index stats_count += 1; } if(c % n_vis == 0){ save2Dfields(c, fft, gpu, h_vel, vel); } // Synchronize GPUs before moving to next timestep synchronizeGPUs(nGPUs); // Save data to file every n_checkpoint timesteps if ( c % n_checkpoint == 0 ){ save3Dfields(c, fft, gpu, h_vel, vel); } //=============================================================================================== // End of Timestep //=============================================================================================== steptime = GetTimer(); time += steptime; if(c%n_stats!=0) printIterTime(c,steptime); } //================================================================================================ // End of time stepping loop - save final results and clean up workspace variables //================================================================================================ printf("Total time elapsed: %2.2fs\n", time/1000); // Synchronize devices hipSetDevice(0); hipDeviceSynchronize(); // Copy turbulent results from GPU to CPU memory // Make sure that the stats counter is equal to the number of data points being saved if(stats_count != nt/n_stats+1) printf("Error: Length of stats not equal to counter!!\n"); // Post-Simulation cleanup // Deallocate resources deallocate_memory(); // Reset all GPUs for(n = 0; n<nGPUs; ++n){ hipSetDevice(n); hipDeviceReset(); } hipProfilerStop(); return 0; }
d04ce2a65bf4fd6e2288b4d77b27bff04c4fa895.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <complex.h> // includes, project #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cufft.h> #include <cuComplex.h> #include <helper_functions.h> #include <helper_cuda.h> #include <timer.h> #include <cuda_profiler_api.h> // include parameters for DNS #include "dnsparams.h" #include "solver.h" #include "statistics.h" #include "cudafuncs.h" #include "iofuncs.h" #include "initialize.h" #include "fftfuncs.h" #include "struct_def.h" #include "declare.h" #include "allocate.h" void splitData(int numGPUs, gpuinfo *gpu) { int i, n; gpu->nGPUs = numGPUs; // Allocate pinned memory on the host that stores GPU info cudaHostAlloc((void**)&gpu->gpunum, numGPUs*sizeof(gpu->gpunum), cudaHostAllocMapped); cudaHostAlloc((void**)&gpu->ny, numGPUs*sizeof(gpu->ny), cudaHostAllocMapped); cudaHostAlloc((void**)&gpu->nx, numGPUs*sizeof(gpu->nx), cudaHostAllocMapped); cudaHostAlloc((void**)&gpu->start_y, numGPUs*sizeof(gpu->start_y), cudaHostAllocMapped); cudaHostAlloc((void**)&gpu->start_x, numGPUs*sizeof(gpu->start_x), cudaHostAllocMapped); // Add numGPUs to each GPU struct: for(i=0;i<numGPUs;++i){ gpu->gpunum[i] = i; } // Splitting data in x-direction if(NX % numGPUs == 0){ for (i=0; i<numGPUs; ++i){ gpu->nx[i] = NX/numGPUs; gpu->start_x[i] = i*gpu->nx[i]; } } else { printf("Warning: number of GPUs is not an even multiple of the data size\n"); n = NX/numGPUs; for(i=0; i<(numGPUs-1); ++i){ gpu->nx[i] = n; gpu->start_x[i] = i*gpu->nx[i]; } gpu->nx[numGPUs-1] = n + NX%numGPUs; gpu->start_x[numGPUs-1] = (numGPUs-1)*n; } // Now splitting data across y-direction if(NY % numGPUs == 0){ for (i=0; i<numGPUs; ++i){ gpu->ny[i] = NY/numGPUs; gpu->start_y[i] = i*gpu->ny[i]; } } else { printf("Warning: number of GPUs is not an even multiple of the data size\n"); n = NY/numGPUs; for(i=0; i<(numGPUs-1); ++i){ gpu->ny[i] = n; gpu->start_y[i] = i*gpu->ny[i]; } gpu->ny[numGPUs-1] = n + NY%numGPUs; gpu->start_y[numGPUs-1] = (numGPUs-1)*n; } return; } int main (void) { //===================================================================================================== // Program Start-up //===================================================================================================== // Set GPU's to use and list device properties int n, nGPUs; // Query number of devices attached to host // cudaGetDeviceCount(&nGPUs); nGPUs=2; printf("Welcome to the GPU-based Navier-Stokes Solver! Configuration: \n" "Number of GPUs = %d \n " "Grid size = %dx%dx%d \n ",nGPUs,NX,NY,NZ); // List properties of each device displayDeviceProps(nGPUs); //===================================================================================================== // Allocate Memory //===================================================================================================== splitData(nGPUs, &gpu); // Variables declared in "declare.h" // Allocate memory for variables allocate_memory(); // Create plans for cuFFT on each GPU plan1dFFT(nGPUs, fft); plan2dFFT(gpu, fft); printf("FFT's successfully configured!\n"); // Declare variables int c = 0; int euler = 0; double time=0.0; double steptime=0.0; //======================================================================================================= // Initialize simulation //======================================================================================================= // printf("Starting Timer...\n"); // StartTimer(); // Setup wavespace domain initializeWaveNumbers(gpu, k); // Launch CUDA kernel to initialize velocity field //importVelocity(gpu, h_vel, vel); //importScalar(gpu, h_vel, vel); // initializeTaylorGreen(gpu,vel); initializeJet_Superposition(fft, gpu, k, h_vel, vel, rhs); // Does not require importData // initializeJet_Convolution(fft, gpu, h_vel, vel, rhs); // Does not require importData // Save Initial Data to file (t = 0) // Copy data to host save3Dfields(c, fft, gpu, h_vel, vel); save2Dfields(c, fft, gpu, h_vel, vel); synchronizeGPUs(nGPUs); // Transform velocity to fourier space for timestepping forwardTransform(fft, gpu, vel.u); forwardTransform(fft, gpu, vel.v); forwardTransform(fft, gpu, vel.w); forwardTransform(fft, gpu, vel.s); // Dealias the solution by truncating RHS deAlias(gpu, k, vel); // Calculate statistics at initial condition calcTurbStats_mgpu(0, gpu, fft, k, vel, rhs, stats, Yprofile); // Synchronize GPUs before entering timestepping loop synchronizeGPUs(nGPUs); // Print statistics to screen int stats_count = 0; printTurbStats(stats_count,0.0,stats[0]); // Using 0 index to send aggregate data collected in first index stats_count += 1; // Start iteration timer // StartTimer(); cudaProfilerStart(); //================================================================================================== // Enter time-stepping loop //================================================================================================== for ( c = 1; c <= nt; ++c ){ // Start iteration timer StartTimer(); // Create flags to specify Euler timesteps if (c == 1){ euler = 1; } else{ euler = 0; } // Call pseudospectral Navier-Stokes solver solver_ps(euler, fft, gpu, vel, rhs, rhs_old, k, temp_advective); //============================================================================================== // Calculate bulk turbulence statistics and print to screen //============================================================================================== if(c % n_stats == 0){ calcTurbStats_mgpu(c, gpu, fft, k, vel, rhs, stats, Yprofile); // Get elapsed time from Timer steptime = GetTimer(); // Print statistics to screen printTurbStats(stats_count,steptime,stats[0]); // Using 0 index to send aggregate data collected in first index stats_count += 1; } if(c % n_vis == 0){ save2Dfields(c, fft, gpu, h_vel, vel); } // Synchronize GPUs before moving to next timestep synchronizeGPUs(nGPUs); // Save data to file every n_checkpoint timesteps if ( c % n_checkpoint == 0 ){ save3Dfields(c, fft, gpu, h_vel, vel); } //=============================================================================================== // End of Timestep //=============================================================================================== steptime = GetTimer(); time += steptime; if(c%n_stats!=0) printIterTime(c,steptime); } //================================================================================================ // End of time stepping loop - save final results and clean up workspace variables //================================================================================================ printf("Total time elapsed: %2.2fs\n", time/1000); // Synchronize devices cudaSetDevice(0); cudaDeviceSynchronize(); // Copy turbulent results from GPU to CPU memory // Make sure that the stats counter is equal to the number of data points being saved if(stats_count != nt/n_stats+1) printf("Error: Length of stats not equal to counter!!\n"); // Post-Simulation cleanup // Deallocate resources deallocate_memory(); // Reset all GPUs for(n = 0; n<nGPUs; ++n){ cudaSetDevice(n); cudaDeviceReset(); } cudaProfilerStop(); return 0; }
ec0252e901ed4419d020578ff37802b7980a1286.hip
// !!! This is a file automatically generated by hipify!!! //Example 2. Application Using C and CUBLAS: 0-based indexing //----------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include "rocblas.h" #include "mex.h" #define M 6 #define N 5 #define IDX2C(i,j,ld) (((j)*(ld))+(i)) static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){ hipblasSscal (handle, n-p, &alpha, &m[IDX2C(p,q,ldm)], ldm); hipblasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1); } /* * * This function scales the vector x by the scalar and overwrites it with the result. * Hence, the performed operation is x [ j ] = x [ j ] for i = 1 , , n and j = 1 + ( i - 1 ) * incx . hipblasStatus_t hipblasSscal(hipblasHandle_t handle, int n, const float *alpha, float *x, int incx) handle: handle to the cuBLAS library context. (input) n: number of elements in the vector x (input) alpha: <type> scalar used for multiplication. (input) x: <type> vector with n elements. (in/out) incx: stride between consecutive elements of x. (input) * */ int test (){ hipError_t cudaStat; hipblasStatus_t stat; hipblasHandle_t handle; int i, j; float* devPtrA; float* a = 0; a = (float *)malloc (M * N * sizeof (*a)); if (!a) { mexPrintf ("host memory allocation failed"); return EXIT_FAILURE; } for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { a[IDX2C(i,j,M)] = (float)(i * M + j + 1); } } cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a)); if (cudaStat != hipSuccess) { mexPrintf ("device memory allocation failed"); return EXIT_FAILURE; } stat = hipblasCreate(&handle); if (stat != HIPBLAS_STATUS_SUCCESS) { mexPrintf ("CUBLAS initialization failed\n"); return EXIT_FAILURE; } stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M); /* * hipblasStatus_t hipblasSetMatrix(int rows, int cols, int elemSize, const void *A, int lda, void *B, int ldb) This function copies a tile of rows x cols elements from a matrix A in host memory space to a matrix B in GPU memory space. It is assumed that each element requires storage of elemSize bytes and that both matrices are stored in column-major format, with the leading dimension of the source matrix A and destination matrix B given in lda and ldb, respectively. The leading dimension indicates the number of rows of the allocated matrix, even if only a submatrix of it is being used. In general, B is a device pointer that points to an object, or part of an object, that was allocated in GPU memory space via hipblasAlloc(). * */ if (stat != HIPBLAS_STATUS_SUCCESS) { mexPrintf ("data download failed"); hipFree (devPtrA); hipblasDestroy(handle); return EXIT_FAILURE; } modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f); //inline method stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M); /* * This function copies a tile of rows x cols elements from a matrix A in GPU memory space to a matrix B in host memory space. * It is assumed that each element requires storage of elemSize bytes and that both matrices are stored in column-major format, * with the leading dimension of the source matrix A and destination matrix B given in lda and ldb, respectively. * The leading dimension indicates the number of rows of the allocated matrix, even if only a submatrix of it is being used. * In general, A is a device pointer that points to an object, or part of an object, that was allocated in GPU memory space via hipblasAlloc(). hipblasStatus_t hipblasGetMatrix(int rows, int cols, int elemSize, const void *A, int lda, void *B, int ldb) * */ if (stat != HIPBLAS_STATUS_SUCCESS) { mexPrintf ("data upload failed"); hipFree (devPtrA); hipblasDestroy(handle); return EXIT_FAILURE; } hipFree (devPtrA); hipblasDestroy(handle); for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { mexPrintf ("%7.0f", a[IDX2C(i,j,M)]); } mexPrintf ("\n"); } free(a); return EXIT_SUCCESS; } /*================================================================= * mexfunction.c * * This example demonstrates how to use mexFunction. It returns * the number of elements for each input argument, providing the * function is called with the same number of output arguments * as input arguments. * This is a MEX-file for MATLAB. * Copyright 1984-2011 The MathWorks, Inc. * All rights reserved. *=================================================================*/ #include "mex.h" void mexFunction(int nlhs,mxArray *plhs[],int nrhs,const mxArray *prhs[]) { int i; /* Examine input (right-hand-side) arguments. */ mexPrintf("\nThere are %d right-hand-side argument(s).", nrhs); for (i=0; i<nrhs; i++) { mexPrintf("\n\tInput Arg %i is of type:\t%s ",i,mxGetClassName(prhs[i])); } /* Examine output (left-hand-side) arguments. */ mexPrintf("\n\nThere are %d left-hand-side argument(s).\n", nlhs); if (nlhs > nrhs) mexErrMsgIdAndTxt( "MATLAB:mexfunction:inputOutputMismatch", "Cannot specify more outputs than inputs.\n"); for (i=0; i<nlhs; i++) { plhs[i]=mxCreateDoubleMatrix(1,1,mxREAL); *mxGetPr(plhs[i])=(double)mxGetNumberOfElements(prhs[i]); } test(); }
ec0252e901ed4419d020578ff37802b7980a1286.cu
//Example 2. Application Using C and CUBLAS: 0-based indexing //----------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include "cublas_v2.h" #include "mex.h" #define M 6 #define N 5 #define IDX2C(i,j,ld) (((j)*(ld))+(i)) static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){ cublasSscal (handle, n-p, &alpha, &m[IDX2C(p,q,ldm)], ldm); cublasSscal (handle, ldm-p, &beta, &m[IDX2C(p,q,ldm)], 1); } /* * * This function scales the vector x by the scalar α and overwrites it with the result. * Hence, the performed operation is x [ j ] = α × x [ j ] for i = 1 , … , n and j = 1 + ( i - 1 ) * incx . cublasStatus_t cublasSscal(cublasHandle_t handle, int n, const float *alpha, float *x, int incx) handle: handle to the cuBLAS library context. (input) n: number of elements in the vector x (input) alpha: <type> scalar used for multiplication. (input) x: <type> vector with n elements. (in/out) incx: stride between consecutive elements of x. (input) * */ int test (){ cudaError_t cudaStat; cublasStatus_t stat; cublasHandle_t handle; int i, j; float* devPtrA; float* a = 0; a = (float *)malloc (M * N * sizeof (*a)); if (!a) { mexPrintf ("host memory allocation failed"); return EXIT_FAILURE; } for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { a[IDX2C(i,j,M)] = (float)(i * M + j + 1); } } cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a)); if (cudaStat != cudaSuccess) { mexPrintf ("device memory allocation failed"); return EXIT_FAILURE; } stat = cublasCreate(&handle); if (stat != CUBLAS_STATUS_SUCCESS) { mexPrintf ("CUBLAS initialization failed\n"); return EXIT_FAILURE; } stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M); /* * cublasStatus_t cublasSetMatrix(int rows, int cols, int elemSize, const void *A, int lda, void *B, int ldb) This function copies a tile of rows x cols elements from a matrix A in host memory space to a matrix B in GPU memory space. It is assumed that each element requires storage of elemSize bytes and that both matrices are stored in column-major format, with the leading dimension of the source matrix A and destination matrix B given in lda and ldb, respectively. The leading dimension indicates the number of rows of the allocated matrix, even if only a submatrix of it is being used. In general, B is a device pointer that points to an object, or part of an object, that was allocated in GPU memory space via cublasAlloc(). * */ if (stat != CUBLAS_STATUS_SUCCESS) { mexPrintf ("data download failed"); cudaFree (devPtrA); cublasDestroy(handle); return EXIT_FAILURE; } modify (handle, devPtrA, M, N, 1, 2, 16.0f, 12.0f); //inline method stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M); /* * This function copies a tile of rows x cols elements from a matrix A in GPU memory space to a matrix B in host memory space. * It is assumed that each element requires storage of elemSize bytes and that both matrices are stored in column-major format, * with the leading dimension of the source matrix A and destination matrix B given in lda and ldb, respectively. * The leading dimension indicates the number of rows of the allocated matrix, even if only a submatrix of it is being used. * In general, A is a device pointer that points to an object, or part of an object, that was allocated in GPU memory space via cublasAlloc(). cublasStatus_t cublasGetMatrix(int rows, int cols, int elemSize, const void *A, int lda, void *B, int ldb) * */ if (stat != CUBLAS_STATUS_SUCCESS) { mexPrintf ("data upload failed"); cudaFree (devPtrA); cublasDestroy(handle); return EXIT_FAILURE; } cudaFree (devPtrA); cublasDestroy(handle); for (j = 0; j < N; j++) { for (i = 0; i < M; i++) { mexPrintf ("%7.0f", a[IDX2C(i,j,M)]); } mexPrintf ("\n"); } free(a); return EXIT_SUCCESS; } /*================================================================= * mexfunction.c * * This example demonstrates how to use mexFunction. It returns * the number of elements for each input argument, providing the * function is called with the same number of output arguments * as input arguments. * This is a MEX-file for MATLAB. * Copyright 1984-2011 The MathWorks, Inc. * All rights reserved. *=================================================================*/ #include "mex.h" void mexFunction(int nlhs,mxArray *plhs[],int nrhs,const mxArray *prhs[]) { int i; /* Examine input (right-hand-side) arguments. */ mexPrintf("\nThere are %d right-hand-side argument(s).", nrhs); for (i=0; i<nrhs; i++) { mexPrintf("\n\tInput Arg %i is of type:\t%s ",i,mxGetClassName(prhs[i])); } /* Examine output (left-hand-side) arguments. */ mexPrintf("\n\nThere are %d left-hand-side argument(s).\n", nlhs); if (nlhs > nrhs) mexErrMsgIdAndTxt( "MATLAB:mexfunction:inputOutputMismatch", "Cannot specify more outputs than inputs.\n"); for (i=0; i<nlhs; i++) { plhs[i]=mxCreateDoubleMatrix(1,1,mxREAL); *mxGetPr(plhs[i])=(double)mxGetNumberOfElements(prhs[i]); } test(); }
9bfd3fa66f0ddcba9d35e2a61916abd83cb1e3ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "bvh.h" #include "texture.h" #define STB_IMAGE_IMPLEMENTATION #include "libs/stb/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "libs/stb/stb_image_write.h" #define SAMPLES_PER_PIXEL 100 #define SCENE_BALLS //#define SCENE_HDR // remember, the # converts the definition to a char* #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__) inline void check_cuda(hipError_t errcode, char const* const func, const char* const file, int const line) { if (errcode) { fprintf(stderr, "check_cuda error (%d):\nFile \"%s\", line %d\n%s\n", static_cast<unsigned int>(errcode), file, line, hipGetErrorString(errcode)); hipDeviceReset(); exit(99); } } //texture<float, 2, hipReadModeElementType> tex; __device__ vec3 color(const ray& r, hitable_list** scene, hiprandState_t* rstate) { // this section is a simple implementation for a diffuse material with a 50% // attenuation at each bounce ray curr_r = r; vec3 curr_attenuation(1.f, .8f, .7f); //vec3 curr_attenuation(0.067, 0.471, 0.576); for (int i = 0; i < RAY_BOUNCES; ++i) { hit_record hrec; // 0.001 -> ignore hits near zero if ((*scene)->hit(curr_r, 0.00001f, FLT_MAX, hrec)) { ray scattered; vec3 attenuation; vec3 emit = hrec.m()->emit(hrec) + vec3(0.1,0.1,0.1); // bloomy effect if (hrec.m()->scatter(curr_r, scattered, hrec, attenuation, rstate)) { curr_attenuation = emit + attenuation*curr_attenuation; curr_r = scattered; } else { return emit; } /*vec3 target = hrec.p() + hrec.n() + random_point_unit_sphere(rstate); curr_attenuation *= 0.5f; curr_r = ray(hrec.p(), target - hrec.p());*/ } else { /*vec3 unit_direction = vec3::normalize(curr_r.direction()); float t = 0.5f * (unit_direction.y() + 1.0f); vec3 v = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); return curr_attenuation * v; */ // return world color return curr_attenuation; } } return vec3(); // exceeded recursion /*if ((col.r() < 0) || (col.g() < 0) || (col.b() < 0)) { printf("ERROR: COL=%f,%f,%f\n", col.r(), col.g(), col.b()); }*/ } __global__ void init_rand_state(hiprandState_t* randState, int width, int height) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; // if out of range if ((i >= width) || (j >= height)) { return; } int index = utils::XY(i, j); // same seed for every thread, very slow //hiprand_init(SEED, index, 0, &randState[index]); // different seed for each thread, fast hiprand_init(SEED + index, 0, 0, &randState[index]); // produces weird artifacts //hiprand_init(SEED, 0, 0, &randState[index]); } __global__ void render(vec3* frameBuffer, int width, int height, hitable_list** scene, camera** cam, hiprandState_t* randState) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; // if out of range if ((i >= width) || (j >= height)) { return; } int index = utils::XY(i, j); hiprandState_t rstate = randState[index]; vec3 col; for (uint16_t sample = 0; sample < SAMPLES_PER_PIXEL; ++sample) { // remember: random value is [0, 1[ float u = float(i + hiprand_uniform(&rstate)) / float(width); float v = float(j + hiprand_uniform(&rstate)) / float(height); ray r = (*cam)->get_ray(u, v, &rstate); col += color(r, scene, &rstate); } col /= float(SAMPLES_PER_PIXEL); //col.saturate(); // do gamma correction with gamma 2 => raise the color to the power of 1/2 (sqrt) frameBuffer[index] = col.saturate().gamma_correct(); // only for debug //frameBuffer[index] = col.gamma_correct(); } #ifdef SCENE_HDR constexpr char imagePath[] = "textures/hdr.jpg"; __global__ void populate_scene_hdr(hitable_object** objects, hitable_list** scene, camera** cam, hiprandState_t* state, float* textureBuffer) { if (threadIdx.x == 0 && blockIdx.x == 0) { objects[0] = new sphere( vec3(1., 0, -1), 1, //new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) new metal(vec3(0.8, 0.2, 0.5), 0.05) ); objects[0]->set_id(0); text* hdr_texture = new image_texture(textureBuffer, WIDTH*2, HEIGHT*2); //sphere 2 objects[1] = new sphere( vec3(0, 0, 0), 10, new emitter(hdr_texture) ); objects[1]->set_id(1); objects[2] = new sphere( vec3(-1., 0, -1), 1, new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) ); objects[2]->set_id(2); *scene = new hitable_list(objects, nullptr, 3); scene[0]->set_id(3); vec3 lookfrom = vec3(-1, 2, 9); vec3 lookat = vec3(0, 0, -1); float dist_to_focus = (lookfrom - lookat).length(); float aperture = .25f; *cam = new camera( lookfrom, // lookfrom lookat, // lookat vec3(0, 1, 0), // up 20.f, // fov float(WIDTH) / float(HEIGHT), aperture, dist_to_focus, 0, 0.2 ); } } #endif // TODO: check for array boundary #ifdef SCENE_BALLS constexpr char imagePath[] = "textures/earth.jpg"; __global__ void populate_scene_balls(hitable_object** objects, hitable_list** scene, camera** cam, hiprandState_t* state, float* textureBuffer) { if (threadIdx.x == 0 && blockIdx.x == 0) { // only call once // sphere 1 objects[0] = new sphere( vec3(0, 0, -1), 0.5, new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) //new dielectric(1.3, vec3(1, 1, 1)) //new dielectric(1.5, vec3(1,1,1)) ); objects[0]->set_id(0); // sphere 2 /*text* checker = new checker_texture( new constant_texture(vec3(0.1, 0.2, 0.5)), new constant_texture(vec3(0.5, 0.2, 0.1)));*/ //text* noise1 = new noise_texture(noise_type::TURBULANCE, .1f); text* noise1 = new noise_texture(noise_type::MARBLE, 1.f); /*text* noise = new wood_texture(vec3(0.792, 0.643, 0.447), //vec3(0.267, 0.188, 0.133), vec3(0.412, 0.349, 0.306), 10.f);*/ /*text* checker = new checker_texture( noise, noise);*/ //text* noise1 = new noise_texture(noise_type::MARBLE, .2f); objects[1] = new sphere( vec3(0, -1000.5, 1), 1000, //10, new lambertian(noise1) //new lambertian(new constant_texture(vec3(0.1, 0.2, 0.5))) ); objects[1]->set_id(1); /*objects[1] = new sphere( vec3(0, -20, 1), 10, new lambertian(vec3(0.1, 0.2, 0.5)) ); objects[1]->set_id(1);*/ text* im_text = new image_texture(textureBuffer, WIDTH, HEIGHT); //sphere 3 objects[2] = new sphere( vec3(1, 0, -1), 0.5, //new dielectric(1.5) //new lambertian(noise1) //new lambertian(new constant_texture(vec3(0.1, 0.2, 0.5))) new emitter(im_text,2), //new metal(vec3(1.f), 0.f) //new metal(vec3(1.f), 0.f) //new metal(vec3(0.075, 0.461, 0.559), 0.1f) true); objects[2]->set_id(2); //sphere 4 //perlin_noise::init(state); //perlin_noise noise; //text* per_text = new noise_texture(state); objects[3] = new sphere( vec3(-1, 0, -2), 0.5, //new lambertian(per_text) new metal(vec3(1.f), 0.f) // new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) //new dielectric(1.5, vec3(1, 1, 1)) //new metal(vec3(0.8, 0.8, 0.8), 0.5) ); objects[3]->set_id(3); //sphere 5 objects[4] = new sphere( vec3(0, 0, -2), 0.5, new metal(vec3(0.8, 0.8, 0.8), 0.5) //new metal(vec3(0.8, 0.8, 0.8), 0.5) ); objects[4]->set_id(4); objects[5] = new sphere( vec3(1, 0, -2), 0.5, //new emitter(vec3(1,0.5,0.5)) new dielectric(1.5, vec3(1, 1, 1)) //new lambertian(new constant_texture(vec3(0.1, 0.2, 0.5))) //new lambertian(vec3(0.2, 0.9, 0.3)*0.6) ); objects[5]->set_id(5); objects[6]= new sphere( vec3(-1, 0, -1), 0.5, new emitter(new constant_texture(vec3(0.5,1,0.5))) //new dielectric(1.1, vec3(0.8,1.0,0.8)) ); objects[6]->set_id(6); objects[7] = new moving_sphere( vec3(-1, 1, -1), vec3(-2, 1, -1), 0.f, 1.f, 0.2, new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) //new dielectric(1.5, vec3(1, 1, 1)) //new metal(vec3(0.8, 0.8, 0.8), 0.5) ); /*objects[7] = new sphere( vec3(-1, 1, -1), 0.5, new lambertian(vec3(0.6, 0.1, 0.1)));*/ objects[7]->set_id(7); objects[8] = new bvh_node(objects, 8, 0, 1, state, 0); objects[8]->set_id(8); // check bvh hierarchy //bvh_node::display_tree(static_cast<bvh_node*>(objects[8]), 2); *scene = new hitable_list(objects, static_cast<bvh_node*>(objects[8]), 8); scene[0]->set_id(9); //for (int i = 0; i < 9; ++i) { // printf("(%d) %s\n", objects[i]->get_id(), hitable_object::obj_type_str(objects[i]->get_object_type())); //} //vec3 lookfrom = vec3(-2, 1, 2) * 2; vec3 lookfrom = vec3(-1, 1,5); // revert to 2 //THISvec3 lookfrom = vec3(5, 2, 3); //vec3 lookat = vec3(0, 0, -1); //vec3 lookat = vec3(-1, 0, -1); // redball //vec3 lookat = vec3(1, 0, -1); // marble ball vec3 lookat = vec3(0, 0, -1); float dist_to_focus = (lookfrom - lookat).length(); float aperture = .25f; *cam = new camera( lookfrom, // lookfrom lookat, // lookat vec3(0,1,0), // up 20.f, // fov float(WIDTH) / float(HEIGHT), aperture, dist_to_focus, 0, 0.2 ); //hit_record hrec; //ray r = (*cam)->get_ray(0.54, 0.5, state); //static_cast<bvh_node*>(objects[8])->dfs(r, 0.001f, FLT_MAX, hrec); //assert(0); } } #endif __global__ void free_scene(hitable_object** objects, hitable_list** scene, camera** cam) { // Objects already destoryed inside scene //delete* (objects); //delete* (objects + 1); delete* scene; delete* cam; } int main(int argc, char** argv) { // loading image to host // load image as uint8_t //uint8_t* imgData = stbi_load(imagePath, &w, &h, &ch, 0); //stbi_write_png("export.png", w, h, ch, imgData, w * ch); // load image as float int w, h, ch; stbi_ldr_to_hdr_scale(1.0f); stbi_ldr_to_hdr_gamma(1.0f); float* imgData_h = stbi_loadf(imagePath, &w, &h, &ch, 0); std::cout << "Loaded image with " << w << "x" << h << " and " << ch << " channels\n"; float* imgData_d; size_t imgSize = w * h * ch * sizeof(float); // TODO: for now, store texture in global memory. In the future, use texture checkCudaErrors(hipMalloc((float**)&imgData_d, imgSize)); checkCudaErrors(hipMemcpy(imgData_d, imgData_h, imgSize, hipMemcpyHostToDevice)); stbi_image_free(imgData_h); //stbi_write_png("export2.png", w, h, ch, imgData, w * ch); //stbi_image_free(imgData); std::cout << "Rendering a " << WIDTH << "x" << HEIGHT << " image "; std::cout << "(" << SAMPLES_PER_PIXEL << " samples per pixel) "; std::cout << "in " << THREAD_SIZE_X << "x" << THREAD_SIZE_Y << " blocks.\n"; // _d stands for device hitable_object** hitableObjects_d; hitable_list** scene_d; camera** camera_d; // random state hiprandState_t* rand_state_d; checkCudaErrors(hipMalloc((void**)&rand_state_d, WIDTH * HEIGHT * sizeof(hiprandState_t))); // allocate unified memory that holds the size of our image vec3* frameBuffer_u; // u stands for unified size_t frameBufferSize = WIDTH * HEIGHT * sizeof(vec3); // RGB values for each pixel checkCudaErrors(hipMallocManaged((void**)&frameBuffer_u, frameBufferSize)); // allocate device memory #ifdef SCENE_BALLS checkCudaErrors(hipMalloc((void**)&hitableObjects_d, 9 * sizeof(hitable_object*))); #endif #ifdef SCENE_HDR checkCudaErrors(hipMalloc((void**)&hitableObjects_d, 3 * sizeof(hitable_object*))); #endif checkCudaErrors(hipMalloc((void**)&scene_d, sizeof(hitable_list*))); checkCudaErrors(hipMalloc((void**)&camera_d, sizeof(camera*))); // remember, construction is done in 1 block, 1 thread #ifdef SCENE_BALLS hipLaunchKernelGGL(( populate_scene_balls), dim3(1), dim3(1), 0, 0, hitableObjects_d, scene_d, camera_d, rand_state_d, imgData_d); #endif #ifdef SCENE_HDR hipLaunchKernelGGL(( populate_scene_hdr) , dim3(1), dim3(1) , 0, 0, hitableObjects_d, scene_d, camera_d, rand_state_d, imgData_d); #endif checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); auto start = std::chrono::system_clock::now(); // remember: always round with + 1 dim3 blocks(WIDTH / THREAD_SIZE_X + 1, HEIGHT / THREAD_SIZE_Y + 1); dim3 threads(THREAD_SIZE_X, THREAD_SIZE_Y); // init rand state for each pixel hipLaunchKernelGGL(( init_rand_state), dim3(blocks),dim3(threads), 0, 0, rand_state_d, WIDTH, HEIGHT); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, frameBuffer_u, WIDTH, HEIGHT, scene_d, camera_d, rand_state_d); checkCudaErrors(hipGetLastError()); // block host until all device threads finish checkCudaErrors(hipDeviceSynchronize()); auto end = std::chrono::system_clock::now(); auto timer_seconds = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count(); std::cout << "took " << timer_seconds << "us.\n"; // Output frame buffer as a ppm image #if 0 std::ofstream ppm_image("render.ppm"); ppm_image << "P3\n" << WIDTH << " " << HEIGHT << "\n255\n"; for (int j = HEIGHT - 1; j >= 0; j--) { for (int i = 0; i < WIDTH; i++) { size_t index = utils::XY(i, j); float r = frameBuffer_u[index].r(); float g = frameBuffer_u[index].g(); float b = frameBuffer_u[index].b(); int ir = int(255.99 * r); int ig = int(255.99 * g); int ib = int(255.99 * b); ppm_image << ir << " " << ig << " " << ib << "\n"; } } ppm_image.close(); #endif uint8_t* imgBuff = (uint8_t*)std::malloc(WIDTH * HEIGHT * 3 * sizeof(uint8_t)); for (int j = HEIGHT - 1; j >= 0; --j) { for (int i = 0; i < WIDTH; ++i) { size_t index = utils::XY(i, j); // stbi generates a Y flipped image size_t rev_index = utils::XY(i, HEIGHT - j - 1); float r = frameBuffer_u[index].r(); float g = frameBuffer_u[index].g(); float b = frameBuffer_u[index].b(); imgBuff[rev_index * 3 + 0] = int(255.999f * r) & 255; imgBuff[rev_index * 3 + 1] = int(255.999f * g) & 255; imgBuff[rev_index * 3 + 2] = int(255.999f * b) & 255; } } //stbi_write_png("render.png", WIDTH, HEIGHT, 3, imgBuff, WIDTH * 3); stbi_write_jpg("render.jpg", WIDTH, HEIGHT, 3, imgBuff, 100); std::free(imgBuff); // clean everything checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( free_scene), dim3(1), dim3(1), 0, 0, hitableObjects_d, scene_d, camera_d); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(hitableObjects_d)); checkCudaErrors(hipFree(scene_d)); checkCudaErrors(hipFree(camera_d)); checkCudaErrors(hipFree(rand_state_d)); checkCudaErrors(hipFree(frameBuffer_u)); checkCudaErrors(hipFree(imgData_d)); // Documentation: Destroy all allocations and reset all state on the // current device in the current process checkCudaErrors(hipDeviceReset()); return 0; }
9bfd3fa66f0ddcba9d35e2a61916abd83cb1e3ec.cu
#include "common.h" #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "bvh.h" #include "texture.h" #define STB_IMAGE_IMPLEMENTATION #include "libs/stb/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "libs/stb/stb_image_write.h" #define SAMPLES_PER_PIXEL 100 #define SCENE_BALLS //#define SCENE_HDR // remember, the # converts the definition to a char* #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__) inline void check_cuda(cudaError_t errcode, char const* const func, const char* const file, int const line) { if (errcode) { fprintf(stderr, "check_cuda error (%d):\nFile \"%s\", line %d\n%s\n", static_cast<unsigned int>(errcode), file, line, cudaGetErrorString(errcode)); cudaDeviceReset(); exit(99); } } //texture<float, 2, cudaReadModeElementType> tex; __device__ vec3 color(const ray& r, hitable_list** scene, curandState* rstate) { // this section is a simple implementation for a diffuse material with a 50% // attenuation at each bounce ray curr_r = r; vec3 curr_attenuation(1.f, .8f, .7f); //vec3 curr_attenuation(0.067, 0.471, 0.576); for (int i = 0; i < RAY_BOUNCES; ++i) { hit_record hrec; // 0.001 -> ignore hits near zero if ((*scene)->hit(curr_r, 0.00001f, FLT_MAX, hrec)) { ray scattered; vec3 attenuation; vec3 emit = hrec.m()->emit(hrec) + vec3(0.1,0.1,0.1); // bloomy effect if (hrec.m()->scatter(curr_r, scattered, hrec, attenuation, rstate)) { curr_attenuation = emit + attenuation*curr_attenuation; curr_r = scattered; } else { return emit; } /*vec3 target = hrec.p() + hrec.n() + random_point_unit_sphere(rstate); curr_attenuation *= 0.5f; curr_r = ray(hrec.p(), target - hrec.p());*/ } else { /*vec3 unit_direction = vec3::normalize(curr_r.direction()); float t = 0.5f * (unit_direction.y() + 1.0f); vec3 v = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); return curr_attenuation * v; */ // return world color return curr_attenuation; } } return vec3(); // exceeded recursion /*if ((col.r() < 0) || (col.g() < 0) || (col.b() < 0)) { printf("ERROR: COL=%f,%f,%f\n", col.r(), col.g(), col.b()); }*/ } __global__ void init_rand_state(curandState* randState, int width, int height) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; // if out of range if ((i >= width) || (j >= height)) { return; } int index = utils::XY(i, j); // same seed for every thread, very slow //curand_init(SEED, index, 0, &randState[index]); // different seed for each thread, fast curand_init(SEED + index, 0, 0, &randState[index]); // produces weird artifacts //curand_init(SEED, 0, 0, &randState[index]); } __global__ void render(vec3* frameBuffer, int width, int height, hitable_list** scene, camera** cam, curandState* randState) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; // if out of range if ((i >= width) || (j >= height)) { return; } int index = utils::XY(i, j); curandState rstate = randState[index]; vec3 col; for (uint16_t sample = 0; sample < SAMPLES_PER_PIXEL; ++sample) { // remember: random value is [0, 1[ float u = float(i + curand_uniform(&rstate)) / float(width); float v = float(j + curand_uniform(&rstate)) / float(height); ray r = (*cam)->get_ray(u, v, &rstate); col += color(r, scene, &rstate); } col /= float(SAMPLES_PER_PIXEL); //col.saturate(); // do gamma correction with gamma 2 => raise the color to the power of 1/2 (sqrt) frameBuffer[index] = col.saturate().gamma_correct(); // only for debug //frameBuffer[index] = col.gamma_correct(); } #ifdef SCENE_HDR constexpr char imagePath[] = "textures/hdr.jpg"; __global__ void populate_scene_hdr(hitable_object** objects, hitable_list** scene, camera** cam, curandState* state, float* textureBuffer) { if (threadIdx.x == 0 && blockIdx.x == 0) { objects[0] = new sphere( vec3(1., 0, -1), 1, //new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) new metal(vec3(0.8, 0.2, 0.5), 0.05) ); objects[0]->set_id(0); text* hdr_texture = new image_texture(textureBuffer, WIDTH*2, HEIGHT*2); //sphere 2 objects[1] = new sphere( vec3(0, 0, 0), 10, new emitter(hdr_texture) ); objects[1]->set_id(1); objects[2] = new sphere( vec3(-1., 0, -1), 1, new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) ); objects[2]->set_id(2); *scene = new hitable_list(objects, nullptr, 3); scene[0]->set_id(3); vec3 lookfrom = vec3(-1, 2, 9); vec3 lookat = vec3(0, 0, -1); float dist_to_focus = (lookfrom - lookat).length(); float aperture = .25f; *cam = new camera( lookfrom, // lookfrom lookat, // lookat vec3(0, 1, 0), // up 20.f, // fov float(WIDTH) / float(HEIGHT), aperture, dist_to_focus, 0, 0.2 ); } } #endif // TODO: check for array boundary #ifdef SCENE_BALLS constexpr char imagePath[] = "textures/earth.jpg"; __global__ void populate_scene_balls(hitable_object** objects, hitable_list** scene, camera** cam, curandState* state, float* textureBuffer) { if (threadIdx.x == 0 && blockIdx.x == 0) { // only call once // sphere 1 objects[0] = new sphere( vec3(0, 0, -1), 0.5, new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) //new dielectric(1.3, vec3(1, 1, 1)) //new dielectric(1.5, vec3(1,1,1)) ); objects[0]->set_id(0); // sphere 2 /*text* checker = new checker_texture( new constant_texture(vec3(0.1, 0.2, 0.5)), new constant_texture(vec3(0.5, 0.2, 0.1)));*/ //text* noise1 = new noise_texture(noise_type::TURBULANCE, .1f); text* noise1 = new noise_texture(noise_type::MARBLE, 1.f); /*text* noise = new wood_texture(vec3(0.792, 0.643, 0.447), //vec3(0.267, 0.188, 0.133), vec3(0.412, 0.349, 0.306), 10.f);*/ /*text* checker = new checker_texture( noise, noise);*/ //text* noise1 = new noise_texture(noise_type::MARBLE, .2f); objects[1] = new sphere( vec3(0, -1000.5, 1), 1000, //10, new lambertian(noise1) //new lambertian(new constant_texture(vec3(0.1, 0.2, 0.5))) ); objects[1]->set_id(1); /*objects[1] = new sphere( vec3(0, -20, 1), 10, new lambertian(vec3(0.1, 0.2, 0.5)) ); objects[1]->set_id(1);*/ text* im_text = new image_texture(textureBuffer, WIDTH, HEIGHT); //sphere 3 objects[2] = new sphere( vec3(1, 0, -1), 0.5, //new dielectric(1.5) //new lambertian(noise1) //new lambertian(new constant_texture(vec3(0.1, 0.2, 0.5))) new emitter(im_text,2), //new metal(vec3(1.f), 0.f) //new metal(vec3(1.f), 0.f) //new metal(vec3(0.075, 0.461, 0.559), 0.1f) true); objects[2]->set_id(2); //sphere 4 //perlin_noise::init(state); //perlin_noise noise; //text* per_text = new noise_texture(state); objects[3] = new sphere( vec3(-1, 0, -2), 0.5, //new lambertian(per_text) new metal(vec3(1.f), 0.f) // new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) //new dielectric(1.5, vec3(1, 1, 1)) //new metal(vec3(0.8, 0.8, 0.8), 0.5) ); objects[3]->set_id(3); //sphere 5 objects[4] = new sphere( vec3(0, 0, -2), 0.5, new metal(vec3(0.8, 0.8, 0.8), 0.5) //new metal(vec3(0.8, 0.8, 0.8), 0.5) ); objects[4]->set_id(4); objects[5] = new sphere( vec3(1, 0, -2), 0.5, //new emitter(vec3(1,0.5,0.5)) new dielectric(1.5, vec3(1, 1, 1)) //new lambertian(new constant_texture(vec3(0.1, 0.2, 0.5))) //new lambertian(vec3(0.2, 0.9, 0.3)*0.6) ); objects[5]->set_id(5); objects[6]= new sphere( vec3(-1, 0, -1), 0.5, new emitter(new constant_texture(vec3(0.5,1,0.5))) //new dielectric(1.1, vec3(0.8,1.0,0.8)) ); objects[6]->set_id(6); objects[7] = new moving_sphere( vec3(-1, 1, -1), vec3(-2, 1, -1), 0.f, 1.f, 0.2, new lambertian(new constant_texture(vec3(0.6, 0.1, 0.1))) //new dielectric(1.5, vec3(1, 1, 1)) //new metal(vec3(0.8, 0.8, 0.8), 0.5) ); /*objects[7] = new sphere( vec3(-1, 1, -1), 0.5, new lambertian(vec3(0.6, 0.1, 0.1)));*/ objects[7]->set_id(7); objects[8] = new bvh_node(objects, 8, 0, 1, state, 0); objects[8]->set_id(8); // check bvh hierarchy //bvh_node::display_tree(static_cast<bvh_node*>(objects[8]), 2); *scene = new hitable_list(objects, static_cast<bvh_node*>(objects[8]), 8); scene[0]->set_id(9); //for (int i = 0; i < 9; ++i) { // printf("(%d) %s\n", objects[i]->get_id(), hitable_object::obj_type_str(objects[i]->get_object_type())); //} //vec3 lookfrom = vec3(-2, 1, 2) * 2; vec3 lookfrom = vec3(-1, 1,5); // revert to 2 //THISvec3 lookfrom = vec3(5, 2, 3); //vec3 lookat = vec3(0, 0, -1); //vec3 lookat = vec3(-1, 0, -1); // redball //vec3 lookat = vec3(1, 0, -1); // marble ball vec3 lookat = vec3(0, 0, -1); float dist_to_focus = (lookfrom - lookat).length(); float aperture = .25f; *cam = new camera( lookfrom, // lookfrom lookat, // lookat vec3(0,1,0), // up 20.f, // fov float(WIDTH) / float(HEIGHT), aperture, dist_to_focus, 0, 0.2 ); //hit_record hrec; //ray r = (*cam)->get_ray(0.54, 0.5, state); //static_cast<bvh_node*>(objects[8])->dfs(r, 0.001f, FLT_MAX, hrec); //assert(0); } } #endif __global__ void free_scene(hitable_object** objects, hitable_list** scene, camera** cam) { // Objects already destoryed inside scene //delete* (objects); //delete* (objects + 1); delete* scene; delete* cam; } int main(int argc, char** argv) { // loading image to host // load image as uint8_t //uint8_t* imgData = stbi_load(imagePath, &w, &h, &ch, 0); //stbi_write_png("export.png", w, h, ch, imgData, w * ch); // load image as float int w, h, ch; stbi_ldr_to_hdr_scale(1.0f); stbi_ldr_to_hdr_gamma(1.0f); float* imgData_h = stbi_loadf(imagePath, &w, &h, &ch, 0); std::cout << "Loaded image with " << w << "x" << h << " and " << ch << " channels\n"; float* imgData_d; size_t imgSize = w * h * ch * sizeof(float); // TODO: for now, store texture in global memory. In the future, use texture checkCudaErrors(cudaMalloc((float**)&imgData_d, imgSize)); checkCudaErrors(cudaMemcpy(imgData_d, imgData_h, imgSize, cudaMemcpyHostToDevice)); stbi_image_free(imgData_h); //stbi_write_png("export2.png", w, h, ch, imgData, w * ch); //stbi_image_free(imgData); std::cout << "Rendering a " << WIDTH << "x" << HEIGHT << " image "; std::cout << "(" << SAMPLES_PER_PIXEL << " samples per pixel) "; std::cout << "in " << THREAD_SIZE_X << "x" << THREAD_SIZE_Y << " blocks.\n"; // _d stands for device hitable_object** hitableObjects_d; hitable_list** scene_d; camera** camera_d; // random state curandState* rand_state_d; checkCudaErrors(cudaMalloc((void**)&rand_state_d, WIDTH * HEIGHT * sizeof(curandState))); // allocate unified memory that holds the size of our image vec3* frameBuffer_u; // u stands for unified size_t frameBufferSize = WIDTH * HEIGHT * sizeof(vec3); // RGB values for each pixel checkCudaErrors(cudaMallocManaged((void**)&frameBuffer_u, frameBufferSize)); // allocate device memory #ifdef SCENE_BALLS checkCudaErrors(cudaMalloc((void**)&hitableObjects_d, 9 * sizeof(hitable_object*))); #endif #ifdef SCENE_HDR checkCudaErrors(cudaMalloc((void**)&hitableObjects_d, 3 * sizeof(hitable_object*))); #endif checkCudaErrors(cudaMalloc((void**)&scene_d, sizeof(hitable_list*))); checkCudaErrors(cudaMalloc((void**)&camera_d, sizeof(camera*))); // remember, construction is done in 1 block, 1 thread #ifdef SCENE_BALLS populate_scene_balls<<<1, 1>>> (hitableObjects_d, scene_d, camera_d, rand_state_d, imgData_d); #endif #ifdef SCENE_HDR populate_scene_hdr <<<1, 1 >>> (hitableObjects_d, scene_d, camera_d, rand_state_d, imgData_d); #endif checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); auto start = std::chrono::system_clock::now(); // remember: always round with + 1 dim3 blocks(WIDTH / THREAD_SIZE_X + 1, HEIGHT / THREAD_SIZE_Y + 1); dim3 threads(THREAD_SIZE_X, THREAD_SIZE_Y); // init rand state for each pixel init_rand_state<<<blocks,threads>>>(rand_state_d, WIDTH, HEIGHT); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); render<<<blocks, threads>>>(frameBuffer_u, WIDTH, HEIGHT, scene_d, camera_d, rand_state_d); checkCudaErrors(cudaGetLastError()); // block host until all device threads finish checkCudaErrors(cudaDeviceSynchronize()); auto end = std::chrono::system_clock::now(); auto timer_seconds = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count(); std::cout << "took " << timer_seconds << "us.\n"; // Output frame buffer as a ppm image #if 0 std::ofstream ppm_image("render.ppm"); ppm_image << "P3\n" << WIDTH << " " << HEIGHT << "\n255\n"; for (int j = HEIGHT - 1; j >= 0; j--) { for (int i = 0; i < WIDTH; i++) { size_t index = utils::XY(i, j); float r = frameBuffer_u[index].r(); float g = frameBuffer_u[index].g(); float b = frameBuffer_u[index].b(); int ir = int(255.99 * r); int ig = int(255.99 * g); int ib = int(255.99 * b); ppm_image << ir << " " << ig << " " << ib << "\n"; } } ppm_image.close(); #endif uint8_t* imgBuff = (uint8_t*)std::malloc(WIDTH * HEIGHT * 3 * sizeof(uint8_t)); for (int j = HEIGHT - 1; j >= 0; --j) { for (int i = 0; i < WIDTH; ++i) { size_t index = utils::XY(i, j); // stbi generates a Y flipped image size_t rev_index = utils::XY(i, HEIGHT - j - 1); float r = frameBuffer_u[index].r(); float g = frameBuffer_u[index].g(); float b = frameBuffer_u[index].b(); imgBuff[rev_index * 3 + 0] = int(255.999f * r) & 255; imgBuff[rev_index * 3 + 1] = int(255.999f * g) & 255; imgBuff[rev_index * 3 + 2] = int(255.999f * b) & 255; } } //stbi_write_png("render.png", WIDTH, HEIGHT, 3, imgBuff, WIDTH * 3); stbi_write_jpg("render.jpg", WIDTH, HEIGHT, 3, imgBuff, 100); std::free(imgBuff); // clean everything checkCudaErrors(cudaDeviceSynchronize()); free_scene<<<1, 1>>>(hitableObjects_d, scene_d, camera_d); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(hitableObjects_d)); checkCudaErrors(cudaFree(scene_d)); checkCudaErrors(cudaFree(camera_d)); checkCudaErrors(cudaFree(rand_state_d)); checkCudaErrors(cudaFree(frameBuffer_u)); checkCudaErrors(cudaFree(imgData_d)); // Documentation: Destroy all allocations and reset all state on the // current device in the current process checkCudaErrors(cudaDeviceReset()); return 0; }
e37cd1be81208a7221942f83824ddc58f75fa853.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <set> #include <map> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> using namespace std; #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 #define MAX_EDGES_PER_SHARD 33554432 void safe_call(hipError_t ret, int line) { if(ret!=hipSuccess) { printf("Error at line %d : %s\n",line,hipGetErrorString(ret)); exit(-1); } } typedef struct __interval { int start; int end; } interval_t; typedef struct __edge { int src; int dest; int val; } edge_t; typedef struct __vertex { int val; } vertex_t; typedef struct __shard { int E; int Vstart; int Vend; vertex_t * from; vertex_t * to; } shard_t; /* typedef struct __graph { vertex_t * vertices; } graph_t; graph_t * load_subgraph(interval_t, vector<edge_t>); */ __device__ bool d_over; __global__ void reset() { d_over = false; } __global__ void init(vertex_t * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v].val = 0; else if(v < num_vertices) vertices[v].val = -1; } /*__global__ void gather_bfs(shard_t * shard, vertex_t * vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { if(shard->edges[id].val == (current_depth+1)) { int t=shard->edges[id].dest; if(vertices[t].val == -1) { vertices[t].val = current_depth+1; d_over = true; } } } }*/ __global__ void scatter_bfs(const shard_t * shard, vertex_t * vertices, int current_depth, int V) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { int s=shard->from[id].val; if(s < V) { int t=vertices[s].val; if(t==current_depth) { //shard->edges[id].val = t+1; int u=shard->to[id].val; if(u < V) { if(vertices[u].val == -1) { vertices[u].val = t+1; d_over = true; } } else printf("Illegal vertex dest: %d\n",u); } } else printf("Illegal vertex src: %d\n",s); } } bool cost(const edge_t &a, const edge_t &b) { return (a.src < b.src); } int main(int argc, char * argv[]) { struct timeval t1,t2; static char * filename; if(argc!=2) { printf("./a.out <filename>\n"); exit(-1); } else { filename = argv[1]; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(hipSetDevice(0)); printf("Begin file reading...\n"); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j, k; fscanf(fp,"%d %d",&num_vertices,&num_edges); //Array of vectors. vector i contains the in edges of vertex i vector< vector<edge_t> > outEdges(num_vertices); int * prefixV = (int *) calloc(num_vertices,sizeof(int)); int s,d,v; // In Graphchi case, I am storing the source depth in each edge // In X-stream case, I am storing the destination depth in each edge for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); edge_t e; e.src=s; e.dest=d; outEdges[s].push_back(e); } printf("Finished file reading.\n"); printf("\nBegin interval construction...\n"); // Construction of intervals gettimeofday(&t1,NULL); int num_intervals = 0, add = 1; vector<int> startInter; prefixV[0] = outEdges[0].size(); if(prefixV[0] > MAX_EDGES_PER_SHARD) { startInter.push_back(0); num_intervals++; add = 0; } for(i=1; i<num_vertices; i++) { prefixV[i] = outEdges[i].size(); if(add==1) prefixV[i] += prefixV[i-1]; if(prefixV[i] > MAX_EDGES_PER_SHARD) { startInter.push_back(i); num_intervals++; add = 0; } else add = 1; } if(add==1) { startInter.push_back(i-1); num_intervals++; } interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t)); for(i=0; i<num_intervals; i++) { interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1); interval[i].end = startInter[i]; } gettimeofday(&t2,NULL); printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); printf("\nBegin shard construction...\n"); //Construction of shards gettimeofday(&t1,NULL); shard_t * shard_host = (shard_t *) malloc(num_intervals*sizeof(shard_t)); //Finding the max number of edges in a shard // We will allocate space for that many edges to each shard to maintain consistency int MAX_NUM_EDGES_SHARD = INT_MIN; for(i=0; i<num_intervals; i++) { int t = prefixV[interval[i].end]; if(t > MAX_NUM_EDGES_SHARD) MAX_NUM_EDGES_SHARD = t; } for(i=0; i<num_intervals; i++) { // first and last vertices in shard shard_host[i].Vstart = interval[i].start; shard_host[i].Vend = interval[i].end; // number of edges in shard shard_host[i].E = prefixV[interval[i].end]; shard_host[i].from = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(edge_t)); shard_host[i].to = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(edge_t)); } for(i=0; i<num_intervals; i++) { vector<edge_t> tempEdges; for(j=interval[i].start; j<=interval[i].end; j++) { for(vector<edge_t>::iterator it=outEdges[j].begin(); it!=outEdges[j].end(); ++it) tempEdges.push_back(*it); } //Sorting based on src vertex to align the edges such that the access of vertices[src] is sequential sort(tempEdges.begin(),tempEdges.end(),cost); j=0; for (vector<edge_t>::iterator it = tempEdges.begin() ; it != tempEdges.end(); ++it) { shard_host[i].from[j].val = (*it).src; shard_host[i].to[j].val = (*it).dest; j++; } } gettimeofday(&t2,NULL); printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); int num_of_blocks = 1; int num_of_threads_per_block = MAX_NUM_EDGES_SHARD; if(MAX_NUM_EDGES_SHARD>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(MAX_NUM_EDGES_SHARD/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); shard_t *shard; //CUDA_SAFE_CALL(hipHostMalloc((void **)&shard, sizeof(shard_t))); //CUDA_SAFE_CALL(hipHostMalloc((void **)&shard->edges, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); //CUDA_SAFE_CALL(hipMallocManaged((void **)&shard, sizeof(shard_t))); //CUDA_SAFE_CALL(hipMallocManaged((void **)&shard->edges, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); vertex_t * from_dev; vertex_t * to_dev; CUDA_SAFE_CALL(hipMalloc((void **)&shard, sizeof(shard_t))); CUDA_SAFE_CALL(hipMalloc((void **)&from_dev, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); CUDA_SAFE_CALL(hipMalloc((void **)&to_dev, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); // It will contain the visited status of each vertex vertex_t *vertices; //CUDA_SAFE_CALL(hipHostMalloc((void **)&vertices, num_vertices*sizeof(vertex_t))); vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t)); CUDA_SAFE_CALL(hipMalloc((void **)&vertices, num_vertices*sizeof(vertex_t))); hipLaunchKernelGGL(( init), dim3(((num_vertices+MAX_THREADS_PER_BLOCK-1)/MAX_THREADS_PER_BLOCK)),dim3(MAX_THREADS_PER_BLOCK), 0, 0, vertices, 0, num_vertices); hipEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&end)); printf("Begin kernel\n"); bool stop; k=0; do { stop = false; CUDA_SAFE_CALL(hipMemcpyToSymbol(d_over, &stop, sizeof(bool),0, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipDeviceSynchronize()); for(i=0; i<num_intervals; i++) { //Load the data of shard_host[i] into shard (pinned memory) /*shard->E = shard_host[i].E; shard->Vstart = shard_host[i].Vstart; shard->Vend = shard_host[i].Vend; for (j=0; j<shard_host[i].E; j++) { shard->edges[j] = shard_host[i].edges[j]; j++; }*/ CUDA_SAFE_CALL(hipMemcpy(shard, &shard_host[i], sizeof(shard_t),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(from_dev, shard_host[i].from, shard_host[i].E*sizeof(vertex_t),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(to_dev, shard_host[i].to, shard_host[i].E*sizeof(vertex_t),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(&(shard->from), &from_dev, sizeof(vertex_t *),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(&(shard->to), &to_dev, sizeof(vertex_t *),hipMemcpyHostToDevice)); gettimeofday(&t1,NULL); hipLaunchKernelGGL(( scatter_bfs), dim3(grid), dim3(threads), 0, 0, shard, vertices, k, num_vertices); CUDA_SAFE_CALL(hipDeviceSynchronize()); gettimeofday(&t2,NULL); time += ((t2.tv_sec*1.0e3+t2.tv_usec*1.0e-3)-(t1.tv_sec*1.0e3+t1.tv_usec*1.0e-3)); } /*for(i=0; i<num_intervals; i++) { //Load the data of shard_host[i] into shard (pinned memory) shard.E = shard_host[i].E; shard.Vstart = shard_host[i].Vstart; shard.Vend = shard_host[i].Vend; for (j=0; j<shard_host[i].E; j++) { shard.edges[j] = shard_host[i].edges[j]; j++; } gettimeofday(&t1,NULL); gather_bfs<<<grid, threads>>> (shard, vertices, k, num_vertices); CUDA_SAFE_CALL(hipDeviceSynchronize()); gettimeofday(&t2,NULL); time += ((t2.tv_sec*1.0e3+t2.tv_usec*1.0e-3)-(t1.tv_sec*1.0e3+t1.tv_usec*1.0e-3)) }*/ CUDA_SAFE_CALL(hipMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, hipMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); CUDA_SAFE_CALL(hipMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), hipMemcpyDeviceToHost)); /*for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i].val); }*/ printf("Time: %f ms\n",time); free(interval); for(i=0; i<num_intervals; i++) { free(shard_host[i].from); free(shard_host[i].to); } free(shard_host); free(vertices_host); //CUDA_SAFE_CALL(hipHostFree(vertices)); //CUDA_SAFE_CALL(hipHostFree(shard->edges)); //CUDA_SAFE_CALL(hipHostFree(shard)); CUDA_SAFE_CALL(hipFree(vertices)); CUDA_SAFE_CALL(hipFree(from_dev)); CUDA_SAFE_CALL(hipFree(to_dev)); CUDA_SAFE_CALL(hipFree(shard)); CUDA_SAFE_CALL(hipEventDestroy(start)); CUDA_SAFE_CALL(hipEventDestroy(end)); return 0; }
e37cd1be81208a7221942f83824ddc58f75fa853.cu
#include <iostream> #include <vector> #include <set> #include <map> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> using namespace std; #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 #define MAX_EDGES_PER_SHARD 33554432 void safe_call(cudaError_t ret, int line) { if(ret!=cudaSuccess) { printf("Error at line %d : %s\n",line,cudaGetErrorString(ret)); exit(-1); } } typedef struct __interval { int start; int end; } interval_t; typedef struct __edge { int src; int dest; int val; } edge_t; typedef struct __vertex { int val; } vertex_t; typedef struct __shard { int E; int Vstart; int Vend; vertex_t * from; vertex_t * to; } shard_t; /* typedef struct __graph { vertex_t * vertices; } graph_t; graph_t * load_subgraph(interval_t, vector<edge_t>); */ __device__ bool d_over; __global__ void reset() { d_over = false; } __global__ void init(vertex_t * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v].val = 0; else if(v < num_vertices) vertices[v].val = -1; } /*__global__ void gather_bfs(shard_t * shard, vertex_t * vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { if(shard->edges[id].val == (current_depth+1)) { int t=shard->edges[id].dest; if(vertices[t].val == -1) { vertices[t].val = current_depth+1; d_over = true; } } } }*/ __global__ void scatter_bfs(const shard_t * shard, vertex_t * vertices, int current_depth, int V) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { int s=shard->from[id].val; if(s < V) { int t=vertices[s].val; if(t==current_depth) { //shard->edges[id].val = t+1; int u=shard->to[id].val; if(u < V) { if(vertices[u].val == -1) { vertices[u].val = t+1; d_over = true; } } else printf("Illegal vertex dest: %d\n",u); } } else printf("Illegal vertex src: %d\n",s); } } bool cost(const edge_t &a, const edge_t &b) { return (a.src < b.src); } int main(int argc, char * argv[]) { struct timeval t1,t2; static char * filename; if(argc!=2) { printf("./a.out <filename>\n"); exit(-1); } else { filename = argv[1]; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(cudaSetDevice(0)); printf("Begin file reading...\n"); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j, k; fscanf(fp,"%d %d",&num_vertices,&num_edges); //Array of vectors. vector i contains the in edges of vertex i vector< vector<edge_t> > outEdges(num_vertices); int * prefixV = (int *) calloc(num_vertices,sizeof(int)); int s,d,v; // In Graphchi case, I am storing the source depth in each edge // In X-stream case, I am storing the destination depth in each edge for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); edge_t e; e.src=s; e.dest=d; outEdges[s].push_back(e); } printf("Finished file reading.\n"); printf("\nBegin interval construction...\n"); // Construction of intervals gettimeofday(&t1,NULL); int num_intervals = 0, add = 1; vector<int> startInter; prefixV[0] = outEdges[0].size(); if(prefixV[0] > MAX_EDGES_PER_SHARD) { startInter.push_back(0); num_intervals++; add = 0; } for(i=1; i<num_vertices; i++) { prefixV[i] = outEdges[i].size(); if(add==1) prefixV[i] += prefixV[i-1]; if(prefixV[i] > MAX_EDGES_PER_SHARD) { startInter.push_back(i); num_intervals++; add = 0; } else add = 1; } if(add==1) { startInter.push_back(i-1); num_intervals++; } interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t)); for(i=0; i<num_intervals; i++) { interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1); interval[i].end = startInter[i]; } gettimeofday(&t2,NULL); printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); printf("\nBegin shard construction...\n"); //Construction of shards gettimeofday(&t1,NULL); shard_t * shard_host = (shard_t *) malloc(num_intervals*sizeof(shard_t)); //Finding the max number of edges in a shard // We will allocate space for that many edges to each shard to maintain consistency int MAX_NUM_EDGES_SHARD = INT_MIN; for(i=0; i<num_intervals; i++) { int t = prefixV[interval[i].end]; if(t > MAX_NUM_EDGES_SHARD) MAX_NUM_EDGES_SHARD = t; } for(i=0; i<num_intervals; i++) { // first and last vertices in shard shard_host[i].Vstart = interval[i].start; shard_host[i].Vend = interval[i].end; // number of edges in shard shard_host[i].E = prefixV[interval[i].end]; shard_host[i].from = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(edge_t)); shard_host[i].to = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(edge_t)); } for(i=0; i<num_intervals; i++) { vector<edge_t> tempEdges; for(j=interval[i].start; j<=interval[i].end; j++) { for(vector<edge_t>::iterator it=outEdges[j].begin(); it!=outEdges[j].end(); ++it) tempEdges.push_back(*it); } //Sorting based on src vertex to align the edges such that the access of vertices[src] is sequential sort(tempEdges.begin(),tempEdges.end(),cost); j=0; for (vector<edge_t>::iterator it = tempEdges.begin() ; it != tempEdges.end(); ++it) { shard_host[i].from[j].val = (*it).src; shard_host[i].to[j].val = (*it).dest; j++; } } gettimeofday(&t2,NULL); printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); int num_of_blocks = 1; int num_of_threads_per_block = MAX_NUM_EDGES_SHARD; if(MAX_NUM_EDGES_SHARD>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(MAX_NUM_EDGES_SHARD/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); shard_t *shard; //CUDA_SAFE_CALL(cudaMallocHost((void **)&shard, sizeof(shard_t))); //CUDA_SAFE_CALL(cudaMallocHost((void **)&shard->edges, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); //CUDA_SAFE_CALL(cudaMallocManaged((void **)&shard, sizeof(shard_t))); //CUDA_SAFE_CALL(cudaMallocManaged((void **)&shard->edges, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); vertex_t * from_dev; vertex_t * to_dev; CUDA_SAFE_CALL(cudaMalloc((void **)&shard, sizeof(shard_t))); CUDA_SAFE_CALL(cudaMalloc((void **)&from_dev, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); CUDA_SAFE_CALL(cudaMalloc((void **)&to_dev, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); // It will contain the visited status of each vertex vertex_t *vertices; //CUDA_SAFE_CALL(cudaMallocHost((void **)&vertices, num_vertices*sizeof(vertex_t))); vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t)); CUDA_SAFE_CALL(cudaMalloc((void **)&vertices, num_vertices*sizeof(vertex_t))); init<<<((num_vertices+MAX_THREADS_PER_BLOCK-1)/MAX_THREADS_PER_BLOCK),MAX_THREADS_PER_BLOCK>>> (vertices, 0, num_vertices); cudaEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&end)); printf("Begin kernel\n"); bool stop; k=0; do { stop = false; CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &stop, sizeof(bool),0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); for(i=0; i<num_intervals; i++) { //Load the data of shard_host[i] into shard (pinned memory) /*shard->E = shard_host[i].E; shard->Vstart = shard_host[i].Vstart; shard->Vend = shard_host[i].Vend; for (j=0; j<shard_host[i].E; j++) { shard->edges[j] = shard_host[i].edges[j]; j++; }*/ CUDA_SAFE_CALL(cudaMemcpy(shard, &shard_host[i], sizeof(shard_t),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(from_dev, shard_host[i].from, shard_host[i].E*sizeof(vertex_t),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(to_dev, shard_host[i].to, shard_host[i].E*sizeof(vertex_t),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(&(shard->from), &from_dev, sizeof(vertex_t *),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(&(shard->to), &to_dev, sizeof(vertex_t *),cudaMemcpyHostToDevice)); gettimeofday(&t1,NULL); scatter_bfs<<<grid, threads>>> (shard, vertices, k, num_vertices); CUDA_SAFE_CALL(cudaDeviceSynchronize()); gettimeofday(&t2,NULL); time += ((t2.tv_sec*1.0e3+t2.tv_usec*1.0e-3)-(t1.tv_sec*1.0e3+t1.tv_usec*1.0e-3)); } /*for(i=0; i<num_intervals; i++) { //Load the data of shard_host[i] into shard (pinned memory) shard.E = shard_host[i].E; shard.Vstart = shard_host[i].Vstart; shard.Vend = shard_host[i].Vend; for (j=0; j<shard_host[i].E; j++) { shard.edges[j] = shard_host[i].edges[j]; j++; } gettimeofday(&t1,NULL); gather_bfs<<<grid, threads>>> (shard, vertices, k, num_vertices); CUDA_SAFE_CALL(cudaDeviceSynchronize()); gettimeofday(&t2,NULL); time += ((t2.tv_sec*1.0e3+t2.tv_usec*1.0e-3)-(t1.tv_sec*1.0e3+t1.tv_usec*1.0e-3)) }*/ CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); CUDA_SAFE_CALL(cudaMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), cudaMemcpyDeviceToHost)); /*for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i].val); }*/ printf("Time: %f ms\n",time); free(interval); for(i=0; i<num_intervals; i++) { free(shard_host[i].from); free(shard_host[i].to); } free(shard_host); free(vertices_host); //CUDA_SAFE_CALL(cudaFreeHost(vertices)); //CUDA_SAFE_CALL(cudaFreeHost(shard->edges)); //CUDA_SAFE_CALL(cudaFreeHost(shard)); CUDA_SAFE_CALL(cudaFree(vertices)); CUDA_SAFE_CALL(cudaFree(from_dev)); CUDA_SAFE_CALL(cudaFree(to_dev)); CUDA_SAFE_CALL(cudaFree(shard)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(end)); return 0; }
8e3f2db51a19b0acc5e8cc3c75f2113b00ee0ebd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N 512 #define THREADS_PER_BLOCK 216 // define kernel for realize parallel addition in GPU using CUDA blocks __global__ void parallel_add_with_blocks_kernel(int *dev_c, int *dev_a, int *dev_b) { // use cuda block identifiers for compute vectorial addition dev_c[blockIdx.x] = dev_a[blockIdx.x] + dev_b[blockIdx.x]; } // define kernel for realize parallel addition in GPU using CUDA threads __global__ void parallel_add_with_threads_kernel(int *dev_c, int *dev_a, int *dev_b) { // use cuda thread idintifiers for compute vectorial addition dev_c[threadIdx.x] = dev_a[threadIdx.x] + dev_b[threadIdx.x]; } // define kernel for realize parallel addition in GPU using CUDA threads and blocks simultaneously __global__ void parallel_add_threads_blocks_kernel(int *dev_c, int *dev_a, int *dev_b) { int index = threadIdx.x + blockIdx.x * blockDim.x; dev_c[index] = dev_a[index] + dev_b[index]; } // declare helper function for assign ints to an array of ints void assign_ints(int*, unsigned int); int main() { int *a, *b, *c; // declare host memory for arrays a, b, c int *dev_a, *dev_b, *dev_c; // declare device copies of a, b, c int size = N * sizeof(int); // calculate memory size needed // allocate device memory for a, b, c hipMalloc((void**)&dev_a, size); hipMalloc((void**)&dev_b, size); hipMalloc((void**)&dev_c, size); // allocate host memory for a, b, c a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); // assign values to host a, b assign_ints(a, N); assign_ints(b, N); // asign values to device a, b hipMemcpy(dev_a, a, size, hipMemcpyKind::hipMemcpyHostToDevice); hipMemcpy(dev_b, b, size, hipMemcpyKind::hipMemcpyHostToDevice); // launch kernel for parallel addition using "N blocks" and "one thread per block" hipLaunchKernelGGL(( parallel_add_with_blocks_kernel), dim3(N), dim3(1), 0, 0, dev_c, dev_a, dev_b); // launch kernel for parallel addition using "one block" and "N threads per block" hipLaunchKernelGGL(( parallel_add_with_threads_kernel), dim3(1), dim3(N), 0, 0, dev_c, dev_a, dev_b); // launch N parallel kernels for compute vectorial addition // using "N/THREADS_PER_BLOCK" blocks and "THREADS_PER_BLOCK" threads hipLaunchKernelGGL(( parallel_add_threads_blocks_kernel), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, dev_c, dev_a, dev_b); // copy result from device to host memory hipMemcpy(c, dev_c, size, hipMemcpyKind::hipMemcpyDeviceToHost); // de-allocate host and device memory free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; } // set random ints to an array arr of magnitude size void assign_ints(int *arr, unsigned int size) { for (int i = 0; i < size; i++) { arr[i] = i; } }
8e3f2db51a19b0acc5e8cc3c75f2113b00ee0ebd.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N 512 #define THREADS_PER_BLOCK 216 // define kernel for realize parallel addition in GPU using CUDA blocks __global__ void parallel_add_with_blocks_kernel(int *dev_c, int *dev_a, int *dev_b) { // use cuda block identifiers for compute vectorial addition dev_c[blockIdx.x] = dev_a[blockIdx.x] + dev_b[blockIdx.x]; } // define kernel for realize parallel addition in GPU using CUDA threads __global__ void parallel_add_with_threads_kernel(int *dev_c, int *dev_a, int *dev_b) { // use cuda thread idintifiers for compute vectorial addition dev_c[threadIdx.x] = dev_a[threadIdx.x] + dev_b[threadIdx.x]; } // define kernel for realize parallel addition in GPU using CUDA threads and blocks simultaneously __global__ void parallel_add_threads_blocks_kernel(int *dev_c, int *dev_a, int *dev_b) { int index = threadIdx.x + blockIdx.x * blockDim.x; dev_c[index] = dev_a[index] + dev_b[index]; } // declare helper function for assign ints to an array of ints void assign_ints(int*, unsigned int); int main() { int *a, *b, *c; // declare host memory for arrays a, b, c int *dev_a, *dev_b, *dev_c; // declare device copies of a, b, c int size = N * sizeof(int); // calculate memory size needed // allocate device memory for a, b, c cudaMalloc((void**)&dev_a, size); cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); // allocate host memory for a, b, c a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); // assign values to host a, b assign_ints(a, N); assign_ints(b, N); // asign values to device a, b cudaMemcpy(dev_a, a, size, cudaMemcpyKind::cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyKind::cudaMemcpyHostToDevice); // launch kernel for parallel addition using "N blocks" and "one thread per block" parallel_add_with_blocks_kernel<<<N, 1>>>(dev_c, dev_a, dev_b); // launch kernel for parallel addition using "one block" and "N threads per block" parallel_add_with_threads_kernel<<<1, N>>>(dev_c, dev_a, dev_b); // launch N parallel kernels for compute vectorial addition // using "N/THREADS_PER_BLOCK" blocks and "THREADS_PER_BLOCK" threads parallel_add_threads_blocks_kernel<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(dev_c, dev_a, dev_b); // copy result from device to host memory cudaMemcpy(c, dev_c, size, cudaMemcpyKind::cudaMemcpyDeviceToHost); // de-allocate host and device memory free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; } // set random ints to an array arr of magnitude size void assign_ints(int *arr, unsigned int size) { for (int i = 0; i < size; i++) { arr[i] = i; } }
6c4662403f0a0b79c5b9a9a1b4e5c3259337cf0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include<bits/stdc++.h> using namespace std; #define pi (2.0*acos(0.0)) #define eps 1e-6 #define ll long long #define inf (1<<29) #define vi vector<int> #define vll vector<ll> #define sc(x) scanf("%d",&x) #define scl(x) scanf("%lld",&x) #define all(v) v.begin() , v.end() #define me(a,val) memset( a , val ,sizeof(a) ) #define pb(x) push_back(x) #define pii pair<int,int> #define mp(a,b) make_pair(a,b) #define Q(x) (x) * (x) #define L(x) ((x<<1) + 1) #define R(x) ((x<<1) + 2) #define M(x,y) ((x+y)>>1) #define fi first #define se second #define MOD 1000000007 #define ios ios::sync_with_stdio(0) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 128 #define N 2048 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void print(Matrix A){ for(int i = 0 ; i < 10 ; i++){ for(int j = 0 ; j < 10 ; j++) printf("%.0lf ",A.elements[ i* N + j ]); printf("\n"); } } void MatMul(const Matrix A, const Matrix B, Matrix C){ // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size , hipMemcpyDeviceToHost); print(C); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } int main( void ) { Matrix A , B , C; A.width = B.width = C.width = N; A.height = B.height = C.height = N; A.elements = (float *)malloc( N * N * sizeof(float) ); B.elements = (float *)malloc( N * N * sizeof(float) ); C.elements = (float *)malloc( N * N * sizeof(float) ); for(int i = 0 ; i < N ;i++) for(int j = 0 ; j < N ; j++) A.elements[i*N + j] = (i==j) , B.elements[i*N + j] = (i==j); /*for(int i = 0 ; i < N ;i++) for(int j = 0 ; j < N ; j++){ float r = 0; for(int k = 0 ; k < N ; k++) r += A.elements[i*N + k] * B.elements[k*N + j]; C.elements[i*N + j] = r; } */ MatMul( A , B , C ); }
6c4662403f0a0b79c5b9a9a1b4e5c3259337cf0b.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include<bits/stdc++.h> using namespace std; #define pi (2.0*acos(0.0)) #define eps 1e-6 #define ll long long #define inf (1<<29) #define vi vector<int> #define vll vector<ll> #define sc(x) scanf("%d",&x) #define scl(x) scanf("%lld",&x) #define all(v) v.begin() , v.end() #define me(a,val) memset( a , val ,sizeof(a) ) #define pb(x) push_back(x) #define pii pair<int,int> #define mp(a,b) make_pair(a,b) #define Q(x) (x) * (x) #define L(x) ((x<<1) + 1) #define R(x) ((x<<1) + 2) #define M(x,y) ((x+y)>>1) #define fi first #define se second #define MOD 1000000007 #define ios ios::sync_with_stdio(0) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 128 #define N 2048 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void print(Matrix A){ for(int i = 0 ; i < 10 ; i++){ for(int j = 0 ; j < 10 ; j++) printf("%.0lf ",A.elements[ i* N + j ]); printf("\n"); } } void MatMul(const Matrix A, const Matrix B, Matrix C){ // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size , cudaMemcpyDeviceToHost); print(C); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } int main( void ) { Matrix A , B , C; A.width = B.width = C.width = N; A.height = B.height = C.height = N; A.elements = (float *)malloc( N * N * sizeof(float) ); B.elements = (float *)malloc( N * N * sizeof(float) ); C.elements = (float *)malloc( N * N * sizeof(float) ); for(int i = 0 ; i < N ;i++) for(int j = 0 ; j < N ; j++) A.elements[i*N + j] = (i==j) , B.elements[i*N + j] = (i==j); /*for(int i = 0 ; i < N ;i++) for(int j = 0 ; j < N ; j++){ float r = 0; for(int k = 0 ; k < N ; k++) r += A.elements[i*N + k] * B.elements[k*N + j]; C.elements[i*N + j] = r; } */ MatMul( A , B , C ); }
51e968f8d07d79176c9559c91b595483265a0116.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" hipError_t calcCuda(float *output, const uint8_t *input, const size_t *size); //********************************************************************************************\\ static hipDeviceProp_t deviceProperties_; __global__ void computeMI_Kernel(float *MIs, uint8_t *input, int rowCount, int colCount, int *countNZ, int offset) { int i = threadIdx.x + blockIdx.x * blockDim.x + offset; if (i > rowCount*(rowCount-1)/2) return; int joints[2][2] = { 0 }; int countNZA , countNZB, a, b, j, k; float joint; uint8_t *inputA = 0, *inputB = 0; MIs += i; a = 1; b = 0; for (j = rowCount - 1; j > 1; j--) { if (i < j) break; a++; b++; i -= j; } j = b; i += a; // MIs += i + j*rowCount; *MIs = 0; // *MIs = i * 1000 + j; // for (j = 0; j < i; j++, MIs += colCount) { inputA = input + i; inputB = input + j; countNZA = countNZ[i]; countNZB = countNZ[j]; for (k = 0; k < colCount; k++, inputA += rowCount, inputB += rowCount) { joints[*inputA][*inputB]++; } for (k = 0; k < 4; k++) { a = k % 2; b = k / 2; joint = joints[a][b]; if (joint == 0) continue; joint /= colCount; if (a) a = countNZA; else a = colCount - countNZA; if (b) b = countNZB; else b = colCount - countNZB; *MIs += joint * log2f(joint / ((float)a / colCount) / ((float)b / colCount)); } } /* size_t i, t_count, b_count; t_count = rowCount > deviceProperties_.maxThreadsPerBlock ? deviceProperties_.maxThreadsPerBlock : rowCount; b_count = rowCount / deviceProperties_.maxThreadsPerBlock + 1; computeMI_Kernel << <b_count, t_count >> > (MIs, input, i, rowCount, colCount, countNZ) */ }//********************************************************************************************\\
51e968f8d07d79176c9559c91b595483265a0116.cu
#include "includes.h" cudaError_t calcCuda(float *output, const uint8_t *input, const size_t *size); //********************************************************************************************\\ static cudaDeviceProp deviceProperties_; __global__ void computeMI_Kernel(float *MIs, uint8_t *input, int rowCount, int colCount, int *countNZ, int offset) { int i = threadIdx.x + blockIdx.x * blockDim.x + offset; if (i > rowCount*(rowCount-1)/2) return; int joints[2][2] = { 0 }; int countNZA , countNZB, a, b, j, k; float joint; uint8_t *inputA = 0, *inputB = 0; MIs += i; a = 1; b = 0; for (j = rowCount - 1; j > 1; j--) { if (i < j) break; a++; b++; i -= j; } j = b; i += a; // MIs += i + j*rowCount; *MIs = 0; // *MIs = i * 1000 + j; // for (j = 0; j < i; j++, MIs += colCount) { inputA = input + i; inputB = input + j; countNZA = countNZ[i]; countNZB = countNZ[j]; for (k = 0; k < colCount; k++, inputA += rowCount, inputB += rowCount) { joints[*inputA][*inputB]++; } for (k = 0; k < 4; k++) { a = k % 2; b = k / 2; joint = joints[a][b]; if (joint == 0) continue; joint /= colCount; if (a) a = countNZA; else a = colCount - countNZA; if (b) b = countNZB; else b = colCount - countNZB; *MIs += joint * log2f(joint / ((float)a / colCount) / ((float)b / colCount)); } } /* size_t i, t_count, b_count; t_count = rowCount > deviceProperties_.maxThreadsPerBlock ? deviceProperties_.maxThreadsPerBlock : rowCount; b_count = rowCount / deviceProperties_.maxThreadsPerBlock + 1; computeMI_Kernel << <b_count, t_count >> > (MIs, input, i, rowCount, colCount, countNZ) */ }//********************************************************************************************\\
1c3a73cc402139fbdf3e57480eba487c9ab300af.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hiprand/hiprand_kernel.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "common.h" #include "GridPlot.h" void MakeGridPlot(GridPlot **out, int zoom, int imageWidth, int xStart, int yStart) { GridPlot *gridPlot = (GridPlot *)malloc(sizeof(GridPlot)); gridPlot->zoom = zoom; gridPlot->imageWidth = imageWidth; gridPlot->xStart = xStart; gridPlot->yStart = yStart; *out = gridPlot; } __device__ unsigned char colorValue(float n1, float n2, int hue) { if (hue > 360) hue -= 360; else if (hue < 0) hue += 360; if (hue < 60) return (unsigned char)(255 * (n1 + (n2 - n1)*hue / 60)); if (hue < 180) return (unsigned char)(255 * n2); if (hue < 240) return (unsigned char)(255 * (n1 + (n2 - n1)*(240 - hue) / 60)); return (unsigned char)(255 * n1); }
1c3a73cc402139fbdf3e57480eba487c9ab300af.cu
#include <stdlib.h> #include <stdio.h> #include <curand_kernel.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "common.h" #include "GridPlot.h" void MakeGridPlot(GridPlot **out, int zoom, int imageWidth, int xStart, int yStart) { GridPlot *gridPlot = (GridPlot *)malloc(sizeof(GridPlot)); gridPlot->zoom = zoom; gridPlot->imageWidth = imageWidth; gridPlot->xStart = xStart; gridPlot->yStart = yStart; *out = gridPlot; } __device__ unsigned char colorValue(float n1, float n2, int hue) { if (hue > 360) hue -= 360; else if (hue < 0) hue += 360; if (hue < 60) return (unsigned char)(255 * (n1 + (n2 - n1)*hue / 60)); if (hue < 180) return (unsigned char)(255 * n2); if (hue < 240) return (unsigned char)(255 * (n1 + (n2 - n1)*(240 - hue) / 60)); return (unsigned char)(255 * n1); }
f84932b831d81f8d8637a9f7b1392ef309c0fe02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "vars.h" //TIMERS & COUNTERS extern int numproj; extern double ptime; extern double pktime; extern double pcstime; extern double pcntime; extern double pcrtime; extern double pchtime; extern double pmtime; extern double prtime; extern int numback; extern double btime; extern double bktime; extern double bcstime; extern double bcntime; extern double bcrtime; extern double bchtime; extern double bmtime; extern double brtime; extern int raynuminc; extern int raynumout; extern int mynumray; extern int mynumpix; extern int batchsize; extern int *raysendstart; extern int *rayrecvstart; extern int *raysendcount; extern int *rayrecvcount; extern int *rayraystart; extern int *rayrayind; extern int *rayrecvlist; extern int proj_blocksize; extern int proj_numblocks; extern int proj_numbufftot; extern int *proj_buffdispl; extern int proj_buffsize; extern int proj_mapnztot; extern int *proj_mapdispl; extern int *proj_mapnz; extern int *proj_buffmap; extern int proj_warpnztot; extern int *proj_warpdispl; extern int back_blocksize; extern int back_numblocks; extern int back_numbufftot; extern int *back_buffdispl; extern int back_buffsize; extern int back_mapnztot; extern int *back_mapdispl; extern int *back_mapnz; extern int *back_buffmap; extern int back_warpnztot; extern int *back_warpdispl; #ifdef MATRIX extern matrix *proj_warpindval; extern matrix *back_warpindval; #else extern unsigned short *proj_warpindex; extern MATPREC *proj_warpvalue; extern unsigned short *back_warpindex; extern MATPREC *back_warpvalue; #endif int *proj_buffdispl_d; int *proj_mapdispl_d; int *proj_mapnz_d; int *proj_buffmap_d; int *proj_warpdispl_d; int *back_buffdispl_d; int *back_mapdispl_d; int *back_mapnz_d; int *back_buffmap_d; int *back_warpdispl_d; #ifdef MATRIX matrix *proj_warpindval_d; matrix *back_warpindval_d; #else unsigned short *proj_warpindex_d; MATPREC *proj_warpvalue_d; unsigned short *back_warpindex_d; MATPREC *back_warpvalue_d; #endif int *rayraystart_d; int *rayrayind_d; int *rayindray_d; extern int socketrayout; extern int socketrayinc; extern int *socketreduceout; extern int *socketreduceinc; extern int *socketreduceoutdispl; extern int *socketreduceincdispl; extern int *socketsendcomm; extern int *socketrecvcomm; extern int *socketsendcommdispl; extern int *socketrecvcommdispl; extern int *socketsendmap; extern int *socketreducedispl; extern int *socketreduceindex; extern int *socketraydispl; extern int *socketrayindex; extern int *socketpackmap; extern int *socketunpackmap; extern int noderayout; extern int noderayinc; extern int *nodereduceout; extern int *nodereduceinc; extern int *nodereduceoutdispl; extern int *nodereduceincdispl; extern int *nodesendcomm; extern int *noderecvcomm; extern int *nodesendcommdispl; extern int *noderecvcommdispl; extern int *nodesendmap; extern int *nodereducedispl; extern int *nodereduceindex; extern int *noderaydispl; extern int *noderayindex; extern int *nodepackmap; extern int *nodeunpackmap; extern int *raypackmap; extern int *rayunpackmap; extern int numthreads; extern int numproc; extern int myid; extern MPI_Comm MPI_COMM_BATCH; extern int numproc_batch; extern int myid_batch; extern MPI_Comm MPI_COMM_DATA; extern int numproc_data; extern int myid_data; extern MPI_Comm MPI_COMM_NODE; extern int numproc_node; extern int myid_node; extern int numnode; extern MPI_Comm MPI_COMM_SOCKET; extern int numproc_socket; extern int myid_socket; extern int numsocket; int *socketpackmap_d; int *socketunpackmap_d; int *socketreducedispl_d; int *socketreduceindex_d; int *nodepackmap_d; int *nodeunpackmap_d; int *nodereducedispl_d; int *nodereduceindex_d; int *raypackmap_d; int *rayunpackmap_d; int *noderaydispl_d; int *noderayindex_d; VECPREC *tomobuff_d; VECPREC *partbuff_d; COMMPREC *socketreducesendbuff_d; COMMPREC *socketreducerecvbuff_d; COMMPREC *nodereducesendbuff_d; COMMPREC *nodereducerecvbuff_d; COMMPREC *nodesendbuff_d; COMMPREC *noderecvbuff_d; COMMPREC *nodesendbuff_h; COMMPREC *noderecvbuff_h; extern int *socketrecvbuffdispl_p; extern COMMPREC **socketrecvbuff_p; extern int *socketrecvdevice_p; extern int *noderecvbuffdispl_p; extern COMMPREC **noderecvbuff_p; extern int *noderecvdevice_p; #ifdef MATRIX __global__ void kernel_project __launch_bounds__(1024,1) (VECPREC *y, VECPREC *x, matrix *indval, int numrow, int numcol, int *buffdispl, int *displ, int *mapdispl, int *mapnz, int *buffmap, int buffsize){ #else __global__ void kernel_project __launch_bounds__(1024,1) (VECPREC *y, VECPREC *x, unsigned short *index, MATPREC *value, int numrow, int numcol, int *buffdispl, int *displ, int *mapdispl, int *mapnz, int *buffmap, int buffsize){ #endif extern __shared__ VECPREC shared[]; #ifdef MIXED float acc[FFACTOR] = {0.0}; #else VECPREC acc[FFACTOR] = {0.0}; #endif int wind = threadIdx.x%WARPSIZE; for(int buff = buffdispl[blockIdx.x]; buff < buffdispl[blockIdx.x+1]; buff++){ int mapoffset = mapdispl[buff]; for(int i = threadIdx.x; i < mapnz[buff]; i += blockDim.x){ int ind = buffmap[mapoffset+i]; #pragma unroll for(int f = 0; f < FFACTOR; f++) shared[f*buffsize+i] = x[f*numcol+ind]; } __syncthreads(); int warp = (buff*blockDim.x+threadIdx.x)/WARPSIZE; for(int n = displ[warp]; n < displ[warp+1]; n++){ #ifdef MATRIX matrix mat = indval[n*(long)WARPSIZE+wind]; #ifdef MIXED float val = mat.val; #pragma unroll for(int f = 0; f < FFACTOR; f++) acc[f] += __half2float(shared[f*buffsize+mat.ind])*val; #else for(int f = 0; f < FFACTOR; f++) acc[f] += shared[f*buffsize+mat.ind]*mat.val; #endif #else unsigned short ind = index[n*(long)WARPSIZE+wind]; MATPREC val = value[n*(long)WARPSIZE+wind]; #pragma unroll for(int f = 0; f < FFACTOR; f++) acc[f] += shared[f*buffsize+ind]*val; #endif } __syncthreads(); } int row = blockIdx.x*blockDim.x+threadIdx.x; if(row < numrow) for(int f = 0; f < FFACTOR; f++) y[f*numrow+row] = acc[f]; }; __global__ void kernel_reduce(COMMPREC*,COMMPREC*,int*,int*,int,int,int*,int*); __global__ void kernel_reducenopack(double*,COMMPREC*,int*,int*,int,int,int*,double); __global__ void kernel_scatternopack(double*,COMMPREC*,int*,int*,int,int,int*,double); __global__ void kernel_scatter(COMMPREC*,COMMPREC*,int*,int*,int,int,int*,int*); __global__ void kernel_double2VECPREC(VECPREC*,double*,int,double); __global__ void kernel_VECPREC2double(double*,VECPREC*,int,double); __global__ void kernel_VECPREC2COMMPREC(COMMPREC*,VECPREC*,int,int*); __global__ void kernel_COMMPREC2VECPREC(VECPREC*,COMMPREC*,int,int*); void partial_project(); void partial_backproject(); double *reducebuff_d; double *reducebuff_h; int numdevice; int mydevice; hipEvent_t start,stop; float milliseconds; MPI_Request *sendrequest; MPI_Request *recvrequest; hipStream_t *socketstream; hipStream_t *nodestream; void setup_gpu(double **obj_d, double **gra_d, double **dir_d, double **res_d, double **ray_d, double **obj_h, double **res_h){ hipGetDeviceCount(&numdevice); mydevice = myid%numdevice; hipSetDevice(mydevice); if(myid==0){ int deviceCount; hipGetDeviceCount(&deviceCount); printf("\n"); printf("Device Count: %d\n",deviceCount); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp,0); printf("Device %d name: %s\n",0,deviceProp.name); printf("Clock Frequency: %f GHz\n",deviceProp.clockRate/1.e9); printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor); printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem); printf("Maximum constant memory size: %lu\n",deviceProp.totalConstMem); printf("Maximum shared memory size per block: %lu\n",deviceProp.sharedMemPerBlock); printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock); printf("Warp size: %d\n",deviceProp.warpSize); printf("32-bit Reg. per block: %d\n",deviceProp.regsPerBlock); printf("\n"); } //CONJUGATE-GRADIENT BUFFERS double batchmem = 0.0; batchmem += sizeof(double)*mynumpix*batchsize/1.0e9; if(mynumpix > mynumray) batchmem += sizeof(double)*mynumpix*batchsize/1.0e9; else batchmem += sizeof(double)*mynumray*batchsize/1.0e9; batchmem += sizeof(double)*mynumpix*batchsize/1.0e9; batchmem += sizeof(double)*mynumray*batchsize/1.0e9; hipMalloc((void**)obj_d,sizeof(double)*mynumpix*batchsize); if(mynumpix > mynumray) hipMalloc((void**)gra_d,sizeof(double)*mynumpix*batchsize); else hipMalloc((void**)gra_d,sizeof(double)*mynumray*batchsize); hipMalloc((void**)dir_d,sizeof(double)*mynumpix*batchsize); hipMalloc((void**)res_d,sizeof(double)*mynumray*batchsize); *ray_d = *gra_d; hipHostMalloc((void**)obj_h,sizeof(double)*mynumpix*batchsize); hipHostMalloc((void**)res_h,sizeof(double)*mynumray*batchsize); //REDUCTION BUFFERS int reducebuffsize = 0; if(mynumpix > mynumray) reducebuffsize = (mynumpix*batchsize+255)/256; else reducebuffsize = (mynumray*batchsize+255)/256; if(myid==0)printf("reducebuffsize: %d\n",reducebuffsize); hipMalloc((void**)&reducebuff_d,sizeof(double)*reducebuffsize); hipHostMalloc((void**)&reducebuff_h,sizeof(double)*reducebuffsize); double projmem = 0.0; projmem = projmem + sizeof(int)/1.0e9*(proj_numblocks+1); projmem = projmem + sizeof(int)/1.0e9*(proj_numbufftot+1); projmem = projmem + sizeof(int)/1.0e9*proj_numbufftot; projmem = projmem + sizeof(int)/1.0e9*proj_mapnztot; projmem = projmem + sizeof(int)/1.0e9*(proj_numbufftot*(proj_blocksize/WARPSIZE)+1); projmem = projmem + sizeof(unsigned short)/1.0e9*(proj_warpnztot*(long)WARPSIZE); projmem = projmem + sizeof(MATPREC)/1.0e9*(proj_warpnztot*(long)WARPSIZE); projmem = projmem + sizeof(int)/1.0e9*proj_mapnztot; //printf("PROC %d FORWARD PROJECTION MEMORY: %f GB\n",myid,projmem); double backmem = 0.0; backmem = backmem + sizeof(int)/1.0e9*(back_numblocks+1); backmem = backmem + sizeof(int)/1.0e9*(back_numbufftot+1); backmem = backmem + sizeof(int)/1.0e9*back_numbufftot; backmem = backmem + sizeof(int)/1.0e9*back_mapnztot; backmem = backmem + sizeof(int)/1.0e9*(back_numbufftot*(back_blocksize/WARPSIZE)+1); backmem = backmem + sizeof(unsigned short)/1.0e9*(back_warpnztot*(long)WARPSIZE); backmem = backmem + sizeof(MATPREC)/1.0e9*(back_warpnztot*(long)WARPSIZE); backmem = backmem + sizeof(int)/1.0e9*back_mapnztot; //printf("PROC %d BACKPROJECTION MEMORY: %f GB\n",myid,backmem); hipMalloc((void**)&proj_buffdispl_d,sizeof(int)*(proj_numblocks+1)); hipMalloc((void**)&proj_mapdispl_d,sizeof(int)*(proj_numbufftot+1)); hipMalloc((void**)&proj_mapnz_d,sizeof(int)*proj_numbufftot); hipMalloc((void**)&proj_buffmap_d,sizeof(int)*proj_mapnztot); hipMalloc((void**)&proj_warpdispl_d,sizeof(int)*(proj_numbufftot*(proj_blocksize/WARPSIZE)+1)); hipMemcpy(proj_buffdispl_d,proj_buffdispl,sizeof(int)*(proj_numblocks+1),hipMemcpyHostToDevice); hipMemcpy(proj_mapdispl_d,proj_mapdispl,sizeof(int)*(proj_numbufftot+1),hipMemcpyHostToDevice); hipMemcpy(proj_mapnz_d,proj_mapnz,sizeof(int)*proj_numbufftot,hipMemcpyHostToDevice); hipMemcpy(proj_buffmap_d,proj_buffmap,sizeof(int)*proj_mapnztot,hipMemcpyHostToDevice); hipMemcpy(proj_warpdispl_d,proj_warpdispl,sizeof(int)*(proj_numbufftot*(proj_blocksize/WARPSIZE)+1),hipMemcpyHostToDevice); delete[] proj_buffdispl; delete[] proj_mapdispl; delete[] proj_mapnz; delete[] proj_buffmap; delete[] proj_warpdispl; hipMalloc((void**)&back_buffdispl_d,sizeof(int)*(back_numblocks+1)); hipMalloc((void**)&back_mapdispl_d,sizeof(int)*(back_numbufftot+1)); hipMalloc((void**)&back_mapnz_d,sizeof(int)*back_numbufftot); hipMalloc((void**)&back_buffmap_d,sizeof(int)*back_mapnztot); hipMalloc((void**)&back_warpdispl_d,sizeof(int)*(back_numbufftot*(back_blocksize/WARPSIZE)+1)); hipMemcpy(back_buffdispl_d,back_buffdispl,sizeof(int)*(back_numblocks+1),hipMemcpyHostToDevice); hipMemcpy(back_mapdispl_d,back_mapdispl,sizeof(int)*(back_numbufftot+1),hipMemcpyHostToDevice); hipMemcpy(back_mapnz_d,back_mapnz,sizeof(int)*back_numbufftot,hipMemcpyHostToDevice); hipMemcpy(back_buffmap_d,back_buffmap,sizeof(int)*back_mapnztot,hipMemcpyHostToDevice); hipMemcpy(back_warpdispl_d,back_warpdispl,sizeof(int)*(back_numbufftot*(back_blocksize/WARPSIZE)+1),hipMemcpyHostToDevice); delete[] back_buffdispl; delete[] back_mapdispl; delete[] back_mapnz; delete[] back_buffmap; delete[] back_warpdispl; #ifdef MATRIX hipMalloc((void**)&proj_warpindval_d,sizeof(matrix)*proj_warpnztot*(long)WARPSIZE); hipMalloc((void**)&back_warpindval_d,sizeof(matrix)*back_warpnztot*(long)WARPSIZE); hipMemcpy(proj_warpindval_d,proj_warpindval,sizeof(matrix)*proj_warpnztot*(long)WARPSIZE,hipMemcpyHostToDevice); hipMemcpy(back_warpindval_d,back_warpindval,sizeof(matrix)*back_warpnztot*(long)WARPSIZE,hipMemcpyHostToDevice); delete[] proj_warpindval; delete[] back_warpindval; #else hipMalloc((void**)&proj_warpindex_d,sizeof(unsigned short)*proj_warpnztot*(long)WARPSIZE); hipMalloc((void**)&proj_warpvalue_d,sizeof(MATPREC)*proj_warpnztot*(long)WARPSIZE); hipMalloc((void**)&back_warpindex_d,sizeof(unsigned short)*back_warpnztot*(long)WARPSIZE); hipMalloc((void**)&back_warpvalue_d,sizeof(MATPREC)*back_warpnztot*(long)WARPSIZE); hipMemcpy(proj_warpindex_d,proj_warpindex,sizeof(unsigned short)*proj_warpnztot*(long)WARPSIZE,hipMemcpyHostToDevice); hipMemcpy(proj_warpvalue_d,proj_warpvalue,sizeof(MATPREC)*proj_warpnztot*(long)WARPSIZE,hipMemcpyHostToDevice); hipMemcpy(back_warpindex_d,back_warpindex,sizeof(unsigned short)*back_warpnztot*(long)WARPSIZE,hipMemcpyHostToDevice); hipMemcpy(back_warpvalue_d,back_warpvalue,sizeof(MATPREC)*back_warpnztot*(long)WARPSIZE,hipMemcpyHostToDevice); delete[] proj_warpindex; delete[] proj_warpvalue; delete[] back_warpindex; delete[] back_warpvalue; #endif //COMMUNICATION BUFFERS double commem = 0.0; commem += sizeof(VECPREC)*mynumpix*FFACTOR/1.0e9; commem += sizeof(VECPREC)*raynumout*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*socketsendcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*socketrecvcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*nodesendcommdispl[numproc_node]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*noderecvcommdispl[numproc_node]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR/1.0e9; hipMalloc((void**)&tomobuff_d,sizeof(VECPREC)*mynumpix*FFACTOR); hipMalloc((void**)&partbuff_d,sizeof(VECPREC)*raynumout*FFACTOR); hipMalloc((void**)&socketreducesendbuff_d,sizeof(COMMPREC)*socketsendcommdispl[numproc_socket]*FFACTOR); hipMalloc((void**)&socketreducerecvbuff_d,sizeof(COMMPREC)*socketrecvcommdispl[numproc_socket]*FFACTOR); hipMalloc((void**)&nodereducesendbuff_d,sizeof(COMMPREC)*nodesendcommdispl[numproc_node]*FFACTOR); hipMalloc((void**)&nodereducerecvbuff_d,sizeof(COMMPREC)*noderecvcommdispl[numproc_node]*FFACTOR); hipMalloc((void**)&nodesendbuff_d,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR); hipMalloc((void**)&noderecvbuff_d,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR); //HOST BUFFER hipHostMalloc((void**)&nodesendbuff_h,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR); hipHostMalloc((void**)&noderecvbuff_h,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR); //PACK AND UNPACK MAPS commem += sizeof(int)*socketsendcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(int)*socketrecvcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(int)*(socketreduceoutdispl[numproc_data]+1)/1.0e9; commem += sizeof(int)*socketreducedispl[socketreduceoutdispl[numproc_data]]/1.0e9; commem += sizeof(int)*socketreduceoutdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(int)*noderecvcommdispl[numproc_node]*FFACTOR/1.0e9; commem += sizeof(int)*(nodereduceoutdispl[numproc_data]+1)/1.0e9; commem += sizeof(int)*nodereducedispl[nodereduceoutdispl[numproc_data]]/1.0e9; commem += sizeof(int)*nodereduceoutdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(int)*nodereduceincdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(int)*(mynumray+1)/1.0e9; commem += sizeof(int)*noderaydispl[mynumray]/1.0e9; hipMalloc((void**)&socketpackmap_d,sizeof(int)*socketsendcommdispl[numproc_socket]*FFACTOR); hipMalloc((void**)&socketunpackmap_d,sizeof(int)*socketrecvcommdispl[numproc_socket]*FFACTOR); hipMalloc((void**)&socketreducedispl_d,sizeof(int)*(socketreduceoutdispl[numproc_data]+1)); hipMalloc((void**)&socketreduceindex_d,sizeof(int)*socketreducedispl[socketreduceoutdispl[numproc_data]]); hipMalloc((void**)&nodepackmap_d,sizeof(int)*socketreduceoutdispl[numproc_data]*FFACTOR); hipMalloc((void**)&nodeunpackmap_d,sizeof(int)*noderecvcommdispl[numproc_node]*FFACTOR); hipMalloc((void**)&nodereducedispl_d,sizeof(int)*(nodereduceoutdispl[numproc_data]+1)); hipMalloc((void**)&nodereduceindex_d,sizeof(int)*nodereducedispl[nodereduceoutdispl[numproc_data]]); hipMalloc((void**)&raypackmap_d,sizeof(int)*nodereduceoutdispl[numproc_data]*FFACTOR); hipMalloc((void**)&rayunpackmap_d,sizeof(int)*nodereduceincdispl[numproc_data]*FFACTOR); hipMalloc((void**)&noderaydispl_d,sizeof(int)*(mynumray+1)); hipMalloc((void**)&noderayindex_d,sizeof(int)*noderaydispl[mynumray]); hipMemcpy(socketpackmap_d,socketpackmap,sizeof(int)*socketsendcommdispl[numproc_socket]*FFACTOR,hipMemcpyHostToDevice); hipMemcpy(socketunpackmap_d,socketunpackmap,sizeof(int)*socketrecvcommdispl[numproc_socket]*FFACTOR,hipMemcpyHostToDevice); hipMemcpy(socketreducedispl_d,socketreducedispl,sizeof(int)*(socketreduceoutdispl[numproc_data]+1),hipMemcpyHostToDevice); hipMemcpy(socketreduceindex_d,socketreduceindex,sizeof(int)*socketreducedispl[socketreduceoutdispl[numproc_data]],hipMemcpyHostToDevice); hipMemcpy(nodepackmap_d,nodepackmap,sizeof(int)*socketreduceoutdispl[numproc_data]*FFACTOR,hipMemcpyHostToDevice); hipMemcpy(nodeunpackmap_d,nodeunpackmap,sizeof(int)*noderecvcommdispl[numproc_node]*FFACTOR,hipMemcpyHostToDevice); hipMemcpy(nodereducedispl_d,nodereducedispl,sizeof(int)*(nodereduceoutdispl[numproc_data]+1),hipMemcpyHostToDevice); hipMemcpy(nodereduceindex_d,nodereduceindex,sizeof(int)*nodereducedispl[nodereduceoutdispl[numproc_data]],hipMemcpyHostToDevice); hipMemcpy(raypackmap_d,raypackmap,sizeof(int)*nodereduceoutdispl[numproc_data]*FFACTOR,hipMemcpyHostToDevice); hipMemcpy(rayunpackmap_d,rayunpackmap,sizeof(int)*nodereduceincdispl[numproc_data]*FFACTOR,hipMemcpyHostToDevice); hipMemcpy(noderaydispl_d,noderaydispl,sizeof(int)*(mynumray+1),hipMemcpyHostToDevice); hipMemcpy(noderayindex_d,noderayindex,sizeof(int)*noderaydispl[mynumray],hipMemcpyHostToDevice); double gpumem = projmem+backmem; double gpumems[numproc_data]; double batchmems[numproc_data]; double commems[numproc_data]; MPI_Allgather(&gpumem,1,MPI_DOUBLE,gpumems,1,MPI_DOUBLE,MPI_COMM_DATA); MPI_Allgather(&batchmem,1,MPI_DOUBLE,batchmems,1,MPI_DOUBLE,MPI_COMM_DATA); MPI_Allgather(&commem,1,MPI_DOUBLE,commems,1,MPI_DOUBLE,MPI_COMM_DATA); if(myid==0){ double gpumaxmem = 0.0; double batchmaxmem = 0.0; double commaxmem = 0.0; double totmaxmem = 0.0; double gputotmem = 0.0; double batchtotmem = 0.0; double commtotmem = 0.0; for(int p = 0; p < numproc_data; p++){ printf("PROC %d GPU MEMORY: %f GB + %f GB + %f GB = %f GB\n",p,gpumems[p],batchmems[p],commems[p],gpumems[p]+batchmems[p]+commems[p]); if(gpumems[p]>gpumaxmem)gpumaxmem=gpumems[p]; if(batchmems[p]>batchmaxmem)batchmaxmem=batchmems[p]; if(commems[p]>commaxmem)commaxmem=commems[p]; if(gpumems[p]+batchmems[p]+commems[p]>totmaxmem)totmaxmem=gpumems[p]+batchmems[p]+commems[p]; gputotmem += gpumems[p]; batchtotmem += batchmems[p]; commtotmem += commems[p]; } printf("MAX GPU MEMORY gpumem %f GB batchmem %f GB commem %f GB total %f GB\n",gpumaxmem,batchmaxmem,commaxmem,totmaxmem); printf("TOTAL GPU MEMORY gpumem %f GB + batchmem %f GB + commem %f GB = %f GB\n",gputotmem,batchtotmem,commtotmem,gputotmem+batchtotmem+commtotmem); } hipFuncSetAttribute(kernel_project,hipFuncAttributeMaxDynamicSharedMemorySize,(164-1)*1024); hipFuncSetAttribute(kernel_project,hipFuncAttributePreferredSharedMemoryCarveout,cudaSharedmemCarveoutMaxShared); hipFuncAttributes funcAttributes; hipFuncGetAttributes(&funcAttributes,kernel_project); if(myid==0){ printf("\n"); printf("SpMM Attributes\n"); printf("Binary Version: %d\n",funcAttributes.binaryVersion); printf("Cache Mode: %d\n",funcAttributes.cacheModeCA); printf("Constant Memory: %lu\n",funcAttributes.constSizeBytes); printf("Local Memory: %lu\n",funcAttributes.localSizeBytes); printf("Max Dynamic Shared Memory: %d\n",funcAttributes.maxDynamicSharedSizeBytes); printf("Max Threads per Block: %d\n",funcAttributes.maxThreadsPerBlock); printf("Number of Registers: %d\n",funcAttributes.numRegs); printf("Shared Memory Carveout: %d\n",funcAttributes.preferredShmemCarveout); printf("PTX Version %d\n",funcAttributes.ptxVersion); printf("Static Shared Memory: %lu\n",funcAttributes.sharedSizeBytes); printf("\n"); } hipEventCreate(&start); hipEventCreate(&stop); sendrequest = new MPI_Request[numproc_data]; recvrequest = new MPI_Request[numproc_data]; socketstream = new hipStream_t[numproc_socket]; nodestream = new hipStream_t[numproc_node]; for(int p = 0; p < numproc_socket; p++) hipStreamCreate(&socketstream[p]); for(int p = 0; p < numproc_node; p++) hipStreamCreate(&nodestream[p]); communications(); return; } void project(double *sino_d, double *tomo_d, double scale, int batchslice){ hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); double projecttime = MPI_Wtime(); //PARTIAL PROJECTION hipLaunchKernelGGL(( kernel_double2VECPREC), dim3((mynumpix*FFACTOR+255)/256),dim3(256), 0, 0, tomobuff_d,tomo_d,mynumpix*FFACTOR,scale); partial_project(); for(int slice = 0; slice < batchslice; slice += FFACTOR){ //MEMCPY DEVICE TO HOST hipEventRecord(start); hipMemcpy(nodesendbuff_h,nodesendbuff_d,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR,hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); pmtime += milliseconds/1e3; //HOST COMMUNICATION MPI_Barrier(MPI_COMM_DATA); double chtime = MPI_Wtime(); { int sendcount = 0; int recvcount = 0; for(int p = 0; p < numproc_data; p++) if(nodereduceout[p]){ MPI_Issend(nodesendbuff_h+nodereduceoutdispl[p]*FFACTOR,nodereduceout[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,sendrequest+sendcount); sendcount++; } for(int p = 0; p < numproc_data; p++) if(nodereduceinc[p]){ MPI_Irecv(noderecvbuff_h+nodereduceincdispl[p]*FFACTOR,nodereduceinc[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,recvrequest+recvcount); recvcount++; } #ifdef OVERLAP //PARTIAL PROJECTION if(slice+FFACTOR < batchslice){ hipLaunchKernelGGL(( kernel_double2VECPREC), dim3((mynumpix*FFACTOR+255)/256),dim3(256), 0, 0, tomobuff_d,tomo_d+(slice+FFACTOR)*mynumpix,mynumpix*FFACTOR,scale); partial_project(); } #endif MPI_Waitall(sendcount,sendrequest,MPI_STATUSES_IGNORE); MPI_Waitall(recvcount,recvrequest,MPI_STATUSES_IGNORE); } MPI_Barrier(MPI_COMM_DATA); pchtime += MPI_Wtime()-chtime; //if(myid==0)printf("rack time %e\n",MPI_Wtime()-chtime); //MEMCPY HOST TO DEVICE hipEventRecord(start); hipMemcpy(noderecvbuff_d,noderecvbuff_h,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR,hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); pmtime += milliseconds/1e3; //HOST REDUCTION hipEventRecord(start); hipLaunchKernelGGL(( kernel_reducenopack), dim3((mynumray+255)/256),dim3(256), 0, 0, sino_d+slice*mynumray,noderecvbuff_d,noderaydispl_d,noderayindex_d,mynumray,nodereduceincdispl[numproc_data],rayunpackmap_d,1.0/scale); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); prtime += milliseconds/1e3; //#endif numproj++; #ifndef OVERLAP //PARTIAL PROJECTION if(slice+FFACTOR < batchslice){ hipLaunchKernelGGL(( kernel_double2VECPREC), dim3((mynumpix*FFACTOR+255)/256),dim3(256), 0, 0, tomobuff_d,tomo_d+(slice+FFACTOR)*mynumpix,mynumpix*FFACTOR,scale); partial_project(); } #endif } hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); ptime += MPI_Wtime()-projecttime; } void backproject(double *tomo_d, double *sino_d, double scale, int batchslice){ hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); double backprojecttime = MPI_Wtime(); //HOST SCATTER hipEventRecord(start); hipLaunchKernelGGL(( kernel_scatternopack), dim3((mynumray+255)/256),dim3(256), 0, 0, sino_d,noderecvbuff_d,noderaydispl_d,noderayindex_d,mynumray,nodereduceincdispl[numproc_data],rayunpackmap_d,scale); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //MEMCPY DEVICE TO HOST hipEventRecord(start); hipMemcpy(noderecvbuff_h,noderecvbuff_d,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR,hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; //HOST COMMUNICATION MPI_Barrier(MPI_COMM_DATA); double chtime = MPI_Wtime(); { int sendcount = 0; int recvcount = 0; for(int p = 0; p < numproc_data; p++) if(nodereduceout[p]){ MPI_Irecv(nodesendbuff_h+nodereduceoutdispl[p]*FFACTOR,nodereduceout[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,sendrequest+sendcount); sendcount++; } for(int p = 0; p < numproc_data; p++) if(nodereduceinc[p]){ MPI_Issend(noderecvbuff_h+nodereduceincdispl[p]*FFACTOR,nodereduceinc[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,recvrequest+recvcount); recvcount++; } MPI_Waitall(sendcount,sendrequest,MPI_STATUSES_IGNORE); MPI_Waitall(recvcount,recvrequest,MPI_STATUSES_IGNORE); } MPI_Barrier(MPI_COMM_DATA); bchtime += MPI_Wtime()-chtime; //if(myid==0)printf("rack time %e\n",MPI_Wtime()-chtime); //MEMCPY HOST TO DEVICE hipEventRecord(start); hipMemcpy(nodesendbuff_d,nodesendbuff_h,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR,hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; for(int slice = 0; slice < batchslice; slice += FFACTOR){ double chtime; int sendcount = 0; int recvcount = 0; if(slice+FFACTOR < batchslice){ //HOST SCATTER hipEventRecord(start); hipLaunchKernelGGL(( kernel_scatternopack), dim3((mynumray+255)/256),dim3(256), 0, 0, sino_d+(slice+FFACTOR)*mynumray,noderecvbuff_d,noderaydispl_d,noderayindex_d,mynumray,nodereduceincdispl[numproc_data],rayunpackmap_d,scale); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //MEMCPY DEVICE TO HOST hipEventRecord(start); hipMemcpy(noderecvbuff_h,noderecvbuff_d,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR,hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; //HOST COMMUNICATION MPI_Barrier(MPI_COMM_DATA); chtime = MPI_Wtime(); for(int p = 0; p < numproc_data; p++) if(nodereduceout[p]){ MPI_Irecv(nodesendbuff_h+nodereduceoutdispl[p]*FFACTOR,nodereduceout[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,sendrequest+sendcount); sendcount++; } for(int p = 0; p < numproc_data; p++) if(nodereduceinc[p]){ MPI_Issend(noderecvbuff_h+nodereduceincdispl[p]*FFACTOR,nodereduceinc[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,recvrequest+recvcount); recvcount++; } } #ifdef OVERLAP //PARTIAL BACKPROJECTION partial_backproject(); hipLaunchKernelGGL(( kernel_VECPREC2double), dim3((mynumpix*FFACTOR+255)/256),dim3(256), 0, 0, tomo_d+slice*mynumpix,tomobuff_d,mynumpix*FFACTOR,1.0/scale); #endif if(slice+FFACTOR < batchslice){ MPI_Waitall(sendcount,sendrequest,MPI_STATUSES_IGNORE); MPI_Waitall(recvcount,recvrequest,MPI_STATUSES_IGNORE); MPI_Barrier(MPI_COMM_DATA); bchtime += MPI_Wtime()-chtime; //if(myid==0)printf("rack time %e\n",MPI_Wtime()-chtime); } #ifndef OVERLAP //PARTIAL BACKPROJECTION partial_backproject(); hipLaunchKernelGGL(( kernel_VECPREC2double), dim3((mynumpix*FFACTOR+255)/256),dim3(256), 0, 0, tomo_d+slice*mynumpix,tomobuff_d,mynumpix*FFACTOR,1.0/scale); #endif numback++; if(slice+FFACTOR < batchslice){ //MEMCPY HOST TO DEVICE hipEventRecord(start); hipMemcpy(nodesendbuff_d,nodesendbuff_h,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR,hipMemcpyHostToDevice); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; } } hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); btime += MPI_Wtime()-backprojecttime; } void partial_project(){ hipEventRecord(start); #ifdef MATRIX hipLaunchKernelGGL(( kernel_project), dim3(proj_numblocks),dim3(proj_blocksize),sizeof(VECPREC)*proj_buffsize*FFACTOR, 0, partbuff_d,tomobuff_d,proj_warpindval_d,raynumout,mynumpix,proj_buffdispl_d,proj_warpdispl_d,proj_mapdispl_d,proj_mapnz_d,proj_buffmap_d,proj_buffsize); #else hipLaunchKernelGGL(( kernel_project), dim3(proj_numblocks),dim3(proj_blocksize),sizeof(VECPREC)*proj_buffsize*FFACTOR, 0, partbuff_d,tomobuff_d,proj_warpindex_d,proj_warpvalue_d,raynumout,mynumpix,proj_buffdispl_d,proj_warpdispl_d,proj_mapdispl_d,proj_mapnz_d,proj_buffmap_d,proj_buffsize); #endif hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); //if(myid==0)printf("project %e milliseconds\n",milliseconds); pktime += milliseconds/1e3; //COMMUNICATION BUFFER hipLaunchKernelGGL(( kernel_VECPREC2COMMPREC), dim3((raynumout*FFACTOR+255)/256),dim3(256), 0, 0, socketreducesendbuff_d,partbuff_d,raynumout*FFACTOR,socketpackmap_d); hipDeviceSynchronize(); //SOCKET COMMUNICATION MPI_Barrier(MPI_COMM_SOCKET); double cstime = MPI_Wtime(); for(int psend = 0; psend < numproc_socket; psend++) if(socketsendcomm[psend]) hipMemcpyPeerAsync(socketrecvbuff_p[psend]+socketrecvbuffdispl_p[psend]*FFACTOR,socketrecvdevice_p[psend],socketreducesendbuff_d+socketsendcommdispl[psend]*FFACTOR,mydevice,sizeof(COMMPREC)*socketsendcomm[psend]*FFACTOR,socketstream[psend]); hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_SOCKET); pcstime += MPI_Wtime()-cstime; //if(myid==0)printf("socket time %e\n",MPI_Wtime()-cstime); //SOCKET REDUCTION hipEventRecord(start); hipLaunchKernelGGL(( kernel_reduce), dim3((socketreduceoutdispl[numproc_data]+255)/256),dim3(256), 0, 0, nodereducesendbuff_d,socketreducerecvbuff_d,socketreducedispl_d,socketreduceindex_d,socketreduceoutdispl[numproc_data],socketrecvcommdispl[numproc_socket],nodepackmap_d,socketunpackmap_d); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); prtime += milliseconds/1e3; //NODE COMMUNICATION MPI_Barrier(MPI_COMM_NODE); double cntime = MPI_Wtime(); for(int psend = 0; psend < numproc_node; psend++) if(nodesendcomm[psend]) hipMemcpyPeerAsync(noderecvbuff_p[psend]+noderecvbuffdispl_p[psend]*FFACTOR,noderecvdevice_p[psend],nodereducesendbuff_d+nodesendcommdispl[psend]*FFACTOR,mydevice,sizeof(COMMPREC)*nodesendcomm[psend]*FFACTOR,nodestream[psend]); hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_NODE); pcntime += MPI_Wtime()-cntime; //if(myid==0)printf("node time %e\n",MPI_Wtime()-cntime); //NODE REDUCTION hipEventRecord(start); hipLaunchKernelGGL(( kernel_reduce), dim3((nodereduceoutdispl[numproc_data]+255)/256),dim3(256), 0, 0, nodesendbuff_d,nodereducerecvbuff_d,nodereducedispl_d,nodereduceindex_d,nodereduceoutdispl[numproc_data],noderecvcommdispl[numproc_node],raypackmap_d,nodeunpackmap_d); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); prtime += milliseconds/1e3; }; void partial_backproject(){ //NODE SCATTER hipEventRecord(start); hipLaunchKernelGGL(( kernel_scatter), dim3((nodereduceoutdispl[numproc_data]+255)/256),dim3(256), 0, 0, nodesendbuff_d,nodereducerecvbuff_d,nodereducedispl_d,nodereduceindex_d,nodereduceoutdispl[numproc_data],noderecvcommdispl[numproc_node],raypackmap_d,nodeunpackmap_d); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //NODE COMMUNICATION MPI_Barrier(MPI_COMM_NODE); double cntime = MPI_Wtime(); for(int psend = 0; psend < numproc_node; psend++) if(nodesendcomm[psend]) hipMemcpyPeerAsync(nodereducesendbuff_d+nodesendcommdispl[psend]*FFACTOR,mydevice,noderecvbuff_p[psend]+noderecvbuffdispl_p[psend]*FFACTOR,noderecvdevice_p[psend],sizeof(COMMPREC)*nodesendcomm[psend]*FFACTOR,nodestream[psend]); hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_NODE); bcntime += MPI_Wtime()-cntime; //if(myid==0)printf("node time %e\n",MPI_Wtime()-cntime); //SOCKET SCATTER hipEventRecord(start); hipLaunchKernelGGL(( kernel_scatter), dim3((socketreduceoutdispl[numproc_data]+255)/256),dim3(256), 0, 0, nodereducesendbuff_d,socketreducerecvbuff_d,socketreducedispl_d,socketreduceindex_d,socketreduceoutdispl[numproc_data],socketrecvcommdispl[numproc_socket],nodepackmap_d,socketunpackmap_d); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //SOCKET COMMUNICATION MPI_Barrier(MPI_COMM_SOCKET); double cstime = MPI_Wtime(); for(int psend = 0; psend < numproc_socket; psend++) if(socketsendcomm[psend]) hipMemcpyPeerAsync(socketreducesendbuff_d+socketsendcommdispl[psend]*FFACTOR,mydevice,socketrecvbuff_p[psend]+socketrecvbuffdispl_p[psend]*FFACTOR,socketrecvdevice_p[psend],sizeof(COMMPREC)*socketsendcomm[psend]*FFACTOR,socketstream[psend]); hipDeviceSynchronize(); MPI_Barrier(MPI_COMM_SOCKET); bcstime += MPI_Wtime()-cstime; //if(myid==0)printf("socket time %e\n",MPI_Wtime()-cstime); //BACKPROJECTION hipLaunchKernelGGL(( kernel_COMMPREC2VECPREC), dim3((raynumout*FFACTOR+255)/256),dim3(256), 0, 0, partbuff_d,socketreducesendbuff_d,raynumout*FFACTOR,socketpackmap_d); hipEventRecord(start); #ifdef MATRIX hipLaunchKernelGGL(( kernel_project), dim3(back_numblocks),dim3(back_blocksize),sizeof(VECPREC)*back_buffsize*FFACTOR, 0, tomobuff_d,partbuff_d,back_warpindval_d,mynumpix,raynumout,back_buffdispl_d,back_warpdispl_d,back_mapdispl_d,back_mapnz_d,back_buffmap_d,back_buffsize); #else hipLaunchKernelGGL(( kernel_project), dim3(back_numblocks),dim3(back_blocksize),sizeof(VECPREC)*back_buffsize*FFACTOR, 0, tomobuff_d,partbuff_d,back_warpindex_d,back_warpvalue_d,mynumpix,raynumout,back_buffdispl_d,back_warpdispl_d,back_mapdispl_d,back_mapnz_d,back_buffmap_d,back_buffsize); #endif hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds,start,stop); //if(myid==0)printf("backproject %e milliseconds\n",milliseconds); bktime += milliseconds/1e3; }; __global__ void kernel_reduce(COMMPREC *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *packmap, int *unpackmap){ int row = blockIdx.x*blockDim.x+threadIdx.x; #ifdef MIXED float reduce[FFACTOR] = {0.0}; #else VECPREC reduce[FFACTOR] = {0.0}; #endif if(row < numrow){ for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) #ifdef MIXED reduce[f] += __half2float(x[unpackmap[f*numcol+ind]]); #else reduce[f] += x[unpackmap[f*numcol+ind]]; #endif } for(int f = 0; f < FFACTOR; f++) y[packmap[f*numrow+row]] = reduce[f]; } }; __global__ void kernel_reducenopack(double *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *unpackmap, double scale){ int row = blockIdx.x*blockDim.x+threadIdx.x; #ifdef MIXED float reduce[FFACTOR] = {0.0}; #else VECPREC reduce[FFACTOR] = {0.0}; #endif if(row < numrow){ for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) #ifdef MIXED reduce[f] += __half2float(x[unpackmap[f*numcol+ind]]); #else reduce[f] += x[unpackmap[f*numcol+ind]]; #endif } for(int f = 0; f < FFACTOR; f++) y[f*numrow+row] = (double)reduce[f]*scale; } }; __global__ void kernel_scatternopack(double *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *unpackmap, double scale){ int row = blockIdx.x*blockDim.x+threadIdx.x; VECPREC scatter[FFACTOR] = {0.0}; if(row < numrow){ for(int f = 0; f < FFACTOR; f++) scatter[f] = y[f*numrow+row]*scale; for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) x[unpackmap[f*numcol+ind]] = scatter[f]; } } }; __global__ void kernel_scatter(COMMPREC *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *packmap, int *unpackmap){ int row = blockIdx.x*blockDim.x+threadIdx.x; VECPREC scatter[FFACTOR] = {0.0}; if(row < numrow){ for(int f = 0; f < FFACTOR; f++) scatter[f] = y[packmap[f*numrow+row]]; for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) x[unpackmap[f*numcol+ind]] = scatter[f]; } } }; __global__ void kernel_double2VECPREC(VECPREC *y, double *x,int dim, double scale){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[tid] = x[tid]*scale; }; __global__ void kernel_VECPREC2double(double *y, VECPREC *x,int dim, double scale){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[tid] = (double)x[tid]*scale; }; __global__ void kernel_VECPREC2COMMPREC(COMMPREC *y, VECPREC *x,int dim, int *packmap){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[packmap[tid]] = x[tid]; }; __global__ void kernel_COMMPREC2VECPREC(VECPREC *y, COMMPREC *x,int dim, int *unpackmap){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[tid] = x[unpackmap[tid]]; }; void copyD2D_kernel(double *a, double *b, int dim){ hipMemcpy(a,b,sizeof(double)*dim,hipMemcpyDeviceToDevice); }; void copyD2H_kernel(double *a, double *b, int dim){ hipMemcpy(a,b,sizeof(double)*dim,hipMemcpyDeviceToHost); }; void copyH2D_kernel(double *a, double *b, int dim){ hipMemcpy(a,b,sizeof(double)*dim,hipMemcpyHostToDevice); }; void init_kernel(double *a, int dim){ hipMemset(a,0,sizeof(double)*dim); }; __global__ void kernel_saxpy(double *a, double *b, double coef, double *c, int dim){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) a[tid] = b[tid] + coef*c[tid]; }; void saxpy_kernel(double *a, double *b, double coef, double *c, int dim){ hipLaunchKernelGGL(( kernel_saxpy), dim3((dim+255)/256),dim3(256), 0, 0, a,b,coef,c,dim); }; __global__ void kernel_scale(double *a, double coef, int dim){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) a[tid] = coef*a[tid]; }; void scale_kernel(double *a, double coef, int dim){ hipLaunchKernelGGL(( kernel_scale), dim3((dim+255)/256),dim3(256), 0, 0, a,coef,dim); }; __global__ void kernel_dot(double *a, double *b, int dim, double *buffer){ extern __shared__ double temp[]; int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) temp[threadIdx.x] = a[tid]*b[tid]; else temp[threadIdx.x] = 0; for(int stride = blockDim.x/2; stride > 0; stride>>=1){ __syncthreads(); if(threadIdx.x < stride) temp[threadIdx.x] += temp[threadIdx.x+stride]; } if(threadIdx.x==0) buffer[blockIdx.x] = temp[0]; }; double dot_kernel(double *a, double *b, int dim){ int numblocks = (dim+255)/256; hipLaunchKernelGGL(( kernel_dot), dim3(numblocks),dim3(256),sizeof(double)*256, 0, a,b,dim,reducebuff_d); hipMemcpy(reducebuff_h,reducebuff_d,sizeof(double)*numblocks,hipMemcpyDeviceToHost); double reduce = 0.0; for(int n = 0; n < numblocks; n++) reduce += reducebuff_h[n]; MPI_Allreduce(MPI_IN_PLACE,&reduce,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_DATA); return reduce; }; __global__ void kernel_max(double *a, int dim, double *buffer){ extern __shared__ double temp[]; int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) temp[threadIdx.x] = a[tid]; else temp[threadIdx.x] = 0.0; for(int stride = blockDim.x/2; stride > 0; stride>>=1){ __syncthreads(); if(threadIdx.x < stride) if(temp[threadIdx.x+stride] > temp[threadIdx.x]) temp[threadIdx.x] = temp[threadIdx.x+stride]; } if(threadIdx.x==0) buffer[blockIdx.x] = temp[0]; }; double max_kernel(double *a, int dim){ int numblocks = (dim+255)/256; hipLaunchKernelGGL(( kernel_max), dim3(numblocks),dim3(256),sizeof(double)*256, 0, a,dim,reducebuff_d); hipMemcpy(reducebuff_h,reducebuff_d,sizeof(double)*numblocks,hipMemcpyDeviceToHost); double reduce = 0.0; for(int n = 0; n < numblocks; n++) if(reducebuff_h[n] > reduce) reduce = reducebuff_h[n]; MPI_Allreduce(MPI_IN_PLACE,&reduce,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_DATA); return reduce; };
f84932b831d81f8d8637a9f7b1392ef309c0fe02.cu
#include "vars.h" //TIMERS & COUNTERS extern int numproj; extern double ptime; extern double pktime; extern double pcstime; extern double pcntime; extern double pcrtime; extern double pchtime; extern double pmtime; extern double prtime; extern int numback; extern double btime; extern double bktime; extern double bcstime; extern double bcntime; extern double bcrtime; extern double bchtime; extern double bmtime; extern double brtime; extern int raynuminc; extern int raynumout; extern int mynumray; extern int mynumpix; extern int batchsize; extern int *raysendstart; extern int *rayrecvstart; extern int *raysendcount; extern int *rayrecvcount; extern int *rayraystart; extern int *rayrayind; extern int *rayrecvlist; extern int proj_blocksize; extern int proj_numblocks; extern int proj_numbufftot; extern int *proj_buffdispl; extern int proj_buffsize; extern int proj_mapnztot; extern int *proj_mapdispl; extern int *proj_mapnz; extern int *proj_buffmap; extern int proj_warpnztot; extern int *proj_warpdispl; extern int back_blocksize; extern int back_numblocks; extern int back_numbufftot; extern int *back_buffdispl; extern int back_buffsize; extern int back_mapnztot; extern int *back_mapdispl; extern int *back_mapnz; extern int *back_buffmap; extern int back_warpnztot; extern int *back_warpdispl; #ifdef MATRIX extern matrix *proj_warpindval; extern matrix *back_warpindval; #else extern unsigned short *proj_warpindex; extern MATPREC *proj_warpvalue; extern unsigned short *back_warpindex; extern MATPREC *back_warpvalue; #endif int *proj_buffdispl_d; int *proj_mapdispl_d; int *proj_mapnz_d; int *proj_buffmap_d; int *proj_warpdispl_d; int *back_buffdispl_d; int *back_mapdispl_d; int *back_mapnz_d; int *back_buffmap_d; int *back_warpdispl_d; #ifdef MATRIX matrix *proj_warpindval_d; matrix *back_warpindval_d; #else unsigned short *proj_warpindex_d; MATPREC *proj_warpvalue_d; unsigned short *back_warpindex_d; MATPREC *back_warpvalue_d; #endif int *rayraystart_d; int *rayrayind_d; int *rayindray_d; extern int socketrayout; extern int socketrayinc; extern int *socketreduceout; extern int *socketreduceinc; extern int *socketreduceoutdispl; extern int *socketreduceincdispl; extern int *socketsendcomm; extern int *socketrecvcomm; extern int *socketsendcommdispl; extern int *socketrecvcommdispl; extern int *socketsendmap; extern int *socketreducedispl; extern int *socketreduceindex; extern int *socketraydispl; extern int *socketrayindex; extern int *socketpackmap; extern int *socketunpackmap; extern int noderayout; extern int noderayinc; extern int *nodereduceout; extern int *nodereduceinc; extern int *nodereduceoutdispl; extern int *nodereduceincdispl; extern int *nodesendcomm; extern int *noderecvcomm; extern int *nodesendcommdispl; extern int *noderecvcommdispl; extern int *nodesendmap; extern int *nodereducedispl; extern int *nodereduceindex; extern int *noderaydispl; extern int *noderayindex; extern int *nodepackmap; extern int *nodeunpackmap; extern int *raypackmap; extern int *rayunpackmap; extern int numthreads; extern int numproc; extern int myid; extern MPI_Comm MPI_COMM_BATCH; extern int numproc_batch; extern int myid_batch; extern MPI_Comm MPI_COMM_DATA; extern int numproc_data; extern int myid_data; extern MPI_Comm MPI_COMM_NODE; extern int numproc_node; extern int myid_node; extern int numnode; extern MPI_Comm MPI_COMM_SOCKET; extern int numproc_socket; extern int myid_socket; extern int numsocket; int *socketpackmap_d; int *socketunpackmap_d; int *socketreducedispl_d; int *socketreduceindex_d; int *nodepackmap_d; int *nodeunpackmap_d; int *nodereducedispl_d; int *nodereduceindex_d; int *raypackmap_d; int *rayunpackmap_d; int *noderaydispl_d; int *noderayindex_d; VECPREC *tomobuff_d; VECPREC *partbuff_d; COMMPREC *socketreducesendbuff_d; COMMPREC *socketreducerecvbuff_d; COMMPREC *nodereducesendbuff_d; COMMPREC *nodereducerecvbuff_d; COMMPREC *nodesendbuff_d; COMMPREC *noderecvbuff_d; COMMPREC *nodesendbuff_h; COMMPREC *noderecvbuff_h; extern int *socketrecvbuffdispl_p; extern COMMPREC **socketrecvbuff_p; extern int *socketrecvdevice_p; extern int *noderecvbuffdispl_p; extern COMMPREC **noderecvbuff_p; extern int *noderecvdevice_p; #ifdef MATRIX __global__ void kernel_project __launch_bounds__(1024,1) (VECPREC *y, VECPREC *x, matrix *indval, int numrow, int numcol, int *buffdispl, int *displ, int *mapdispl, int *mapnz, int *buffmap, int buffsize){ #else __global__ void kernel_project __launch_bounds__(1024,1) (VECPREC *y, VECPREC *x, unsigned short *index, MATPREC *value, int numrow, int numcol, int *buffdispl, int *displ, int *mapdispl, int *mapnz, int *buffmap, int buffsize){ #endif extern __shared__ VECPREC shared[]; #ifdef MIXED float acc[FFACTOR] = {0.0}; #else VECPREC acc[FFACTOR] = {0.0}; #endif int wind = threadIdx.x%WARPSIZE; for(int buff = buffdispl[blockIdx.x]; buff < buffdispl[blockIdx.x+1]; buff++){ int mapoffset = mapdispl[buff]; for(int i = threadIdx.x; i < mapnz[buff]; i += blockDim.x){ int ind = buffmap[mapoffset+i]; #pragma unroll for(int f = 0; f < FFACTOR; f++) shared[f*buffsize+i] = x[f*numcol+ind]; } __syncthreads(); int warp = (buff*blockDim.x+threadIdx.x)/WARPSIZE; for(int n = displ[warp]; n < displ[warp+1]; n++){ #ifdef MATRIX matrix mat = indval[n*(long)WARPSIZE+wind]; #ifdef MIXED float val = mat.val; #pragma unroll for(int f = 0; f < FFACTOR; f++) acc[f] += __half2float(shared[f*buffsize+mat.ind])*val; #else for(int f = 0; f < FFACTOR; f++) acc[f] += shared[f*buffsize+mat.ind]*mat.val; #endif #else unsigned short ind = index[n*(long)WARPSIZE+wind]; MATPREC val = value[n*(long)WARPSIZE+wind]; #pragma unroll for(int f = 0; f < FFACTOR; f++) acc[f] += shared[f*buffsize+ind]*val; #endif } __syncthreads(); } int row = blockIdx.x*blockDim.x+threadIdx.x; if(row < numrow) for(int f = 0; f < FFACTOR; f++) y[f*numrow+row] = acc[f]; }; __global__ void kernel_reduce(COMMPREC*,COMMPREC*,int*,int*,int,int,int*,int*); __global__ void kernel_reducenopack(double*,COMMPREC*,int*,int*,int,int,int*,double); __global__ void kernel_scatternopack(double*,COMMPREC*,int*,int*,int,int,int*,double); __global__ void kernel_scatter(COMMPREC*,COMMPREC*,int*,int*,int,int,int*,int*); __global__ void kernel_double2VECPREC(VECPREC*,double*,int,double); __global__ void kernel_VECPREC2double(double*,VECPREC*,int,double); __global__ void kernel_VECPREC2COMMPREC(COMMPREC*,VECPREC*,int,int*); __global__ void kernel_COMMPREC2VECPREC(VECPREC*,COMMPREC*,int,int*); void partial_project(); void partial_backproject(); double *reducebuff_d; double *reducebuff_h; int numdevice; int mydevice; cudaEvent_t start,stop; float milliseconds; MPI_Request *sendrequest; MPI_Request *recvrequest; cudaStream_t *socketstream; cudaStream_t *nodestream; void setup_gpu(double **obj_d, double **gra_d, double **dir_d, double **res_d, double **ray_d, double **obj_h, double **res_h){ cudaGetDeviceCount(&numdevice); mydevice = myid%numdevice; cudaSetDevice(mydevice); if(myid==0){ int deviceCount; cudaGetDeviceCount(&deviceCount); printf("\n"); printf("Device Count: %d\n",deviceCount); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp,0); printf("Device %d name: %s\n",0,deviceProp.name); printf("Clock Frequency: %f GHz\n",deviceProp.clockRate/1.e9); printf("Computational Capabilities: %d, %d\n",deviceProp.major,deviceProp.minor); printf("Maximum global memory size: %lu\n",deviceProp.totalGlobalMem); printf("Maximum constant memory size: %lu\n",deviceProp.totalConstMem); printf("Maximum shared memory size per block: %lu\n",deviceProp.sharedMemPerBlock); printf("Maximum block dimensions: %dx%dx%d\n",deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]); printf("Maximum grid dimensions: %dx%dx%d\n",deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]); printf("Maximum threads per block: %d\n",deviceProp.maxThreadsPerBlock); printf("Warp size: %d\n",deviceProp.warpSize); printf("32-bit Reg. per block: %d\n",deviceProp.regsPerBlock); printf("\n"); } //CONJUGATE-GRADIENT BUFFERS double batchmem = 0.0; batchmem += sizeof(double)*mynumpix*batchsize/1.0e9; if(mynumpix > mynumray) batchmem += sizeof(double)*mynumpix*batchsize/1.0e9; else batchmem += sizeof(double)*mynumray*batchsize/1.0e9; batchmem += sizeof(double)*mynumpix*batchsize/1.0e9; batchmem += sizeof(double)*mynumray*batchsize/1.0e9; cudaMalloc((void**)obj_d,sizeof(double)*mynumpix*batchsize); if(mynumpix > mynumray) cudaMalloc((void**)gra_d,sizeof(double)*mynumpix*batchsize); else cudaMalloc((void**)gra_d,sizeof(double)*mynumray*batchsize); cudaMalloc((void**)dir_d,sizeof(double)*mynumpix*batchsize); cudaMalloc((void**)res_d,sizeof(double)*mynumray*batchsize); *ray_d = *gra_d; cudaMallocHost((void**)obj_h,sizeof(double)*mynumpix*batchsize); cudaMallocHost((void**)res_h,sizeof(double)*mynumray*batchsize); //REDUCTION BUFFERS int reducebuffsize = 0; if(mynumpix > mynumray) reducebuffsize = (mynumpix*batchsize+255)/256; else reducebuffsize = (mynumray*batchsize+255)/256; if(myid==0)printf("reducebuffsize: %d\n",reducebuffsize); cudaMalloc((void**)&reducebuff_d,sizeof(double)*reducebuffsize); cudaMallocHost((void**)&reducebuff_h,sizeof(double)*reducebuffsize); double projmem = 0.0; projmem = projmem + sizeof(int)/1.0e9*(proj_numblocks+1); projmem = projmem + sizeof(int)/1.0e9*(proj_numbufftot+1); projmem = projmem + sizeof(int)/1.0e9*proj_numbufftot; projmem = projmem + sizeof(int)/1.0e9*proj_mapnztot; projmem = projmem + sizeof(int)/1.0e9*(proj_numbufftot*(proj_blocksize/WARPSIZE)+1); projmem = projmem + sizeof(unsigned short)/1.0e9*(proj_warpnztot*(long)WARPSIZE); projmem = projmem + sizeof(MATPREC)/1.0e9*(proj_warpnztot*(long)WARPSIZE); projmem = projmem + sizeof(int)/1.0e9*proj_mapnztot; //printf("PROC %d FORWARD PROJECTION MEMORY: %f GB\n",myid,projmem); double backmem = 0.0; backmem = backmem + sizeof(int)/1.0e9*(back_numblocks+1); backmem = backmem + sizeof(int)/1.0e9*(back_numbufftot+1); backmem = backmem + sizeof(int)/1.0e9*back_numbufftot; backmem = backmem + sizeof(int)/1.0e9*back_mapnztot; backmem = backmem + sizeof(int)/1.0e9*(back_numbufftot*(back_blocksize/WARPSIZE)+1); backmem = backmem + sizeof(unsigned short)/1.0e9*(back_warpnztot*(long)WARPSIZE); backmem = backmem + sizeof(MATPREC)/1.0e9*(back_warpnztot*(long)WARPSIZE); backmem = backmem + sizeof(int)/1.0e9*back_mapnztot; //printf("PROC %d BACKPROJECTION MEMORY: %f GB\n",myid,backmem); cudaMalloc((void**)&proj_buffdispl_d,sizeof(int)*(proj_numblocks+1)); cudaMalloc((void**)&proj_mapdispl_d,sizeof(int)*(proj_numbufftot+1)); cudaMalloc((void**)&proj_mapnz_d,sizeof(int)*proj_numbufftot); cudaMalloc((void**)&proj_buffmap_d,sizeof(int)*proj_mapnztot); cudaMalloc((void**)&proj_warpdispl_d,sizeof(int)*(proj_numbufftot*(proj_blocksize/WARPSIZE)+1)); cudaMemcpy(proj_buffdispl_d,proj_buffdispl,sizeof(int)*(proj_numblocks+1),cudaMemcpyHostToDevice); cudaMemcpy(proj_mapdispl_d,proj_mapdispl,sizeof(int)*(proj_numbufftot+1),cudaMemcpyHostToDevice); cudaMemcpy(proj_mapnz_d,proj_mapnz,sizeof(int)*proj_numbufftot,cudaMemcpyHostToDevice); cudaMemcpy(proj_buffmap_d,proj_buffmap,sizeof(int)*proj_mapnztot,cudaMemcpyHostToDevice); cudaMemcpy(proj_warpdispl_d,proj_warpdispl,sizeof(int)*(proj_numbufftot*(proj_blocksize/WARPSIZE)+1),cudaMemcpyHostToDevice); delete[] proj_buffdispl; delete[] proj_mapdispl; delete[] proj_mapnz; delete[] proj_buffmap; delete[] proj_warpdispl; cudaMalloc((void**)&back_buffdispl_d,sizeof(int)*(back_numblocks+1)); cudaMalloc((void**)&back_mapdispl_d,sizeof(int)*(back_numbufftot+1)); cudaMalloc((void**)&back_mapnz_d,sizeof(int)*back_numbufftot); cudaMalloc((void**)&back_buffmap_d,sizeof(int)*back_mapnztot); cudaMalloc((void**)&back_warpdispl_d,sizeof(int)*(back_numbufftot*(back_blocksize/WARPSIZE)+1)); cudaMemcpy(back_buffdispl_d,back_buffdispl,sizeof(int)*(back_numblocks+1),cudaMemcpyHostToDevice); cudaMemcpy(back_mapdispl_d,back_mapdispl,sizeof(int)*(back_numbufftot+1),cudaMemcpyHostToDevice); cudaMemcpy(back_mapnz_d,back_mapnz,sizeof(int)*back_numbufftot,cudaMemcpyHostToDevice); cudaMemcpy(back_buffmap_d,back_buffmap,sizeof(int)*back_mapnztot,cudaMemcpyHostToDevice); cudaMemcpy(back_warpdispl_d,back_warpdispl,sizeof(int)*(back_numbufftot*(back_blocksize/WARPSIZE)+1),cudaMemcpyHostToDevice); delete[] back_buffdispl; delete[] back_mapdispl; delete[] back_mapnz; delete[] back_buffmap; delete[] back_warpdispl; #ifdef MATRIX cudaMalloc((void**)&proj_warpindval_d,sizeof(matrix)*proj_warpnztot*(long)WARPSIZE); cudaMalloc((void**)&back_warpindval_d,sizeof(matrix)*back_warpnztot*(long)WARPSIZE); cudaMemcpy(proj_warpindval_d,proj_warpindval,sizeof(matrix)*proj_warpnztot*(long)WARPSIZE,cudaMemcpyHostToDevice); cudaMemcpy(back_warpindval_d,back_warpindval,sizeof(matrix)*back_warpnztot*(long)WARPSIZE,cudaMemcpyHostToDevice); delete[] proj_warpindval; delete[] back_warpindval; #else cudaMalloc((void**)&proj_warpindex_d,sizeof(unsigned short)*proj_warpnztot*(long)WARPSIZE); cudaMalloc((void**)&proj_warpvalue_d,sizeof(MATPREC)*proj_warpnztot*(long)WARPSIZE); cudaMalloc((void**)&back_warpindex_d,sizeof(unsigned short)*back_warpnztot*(long)WARPSIZE); cudaMalloc((void**)&back_warpvalue_d,sizeof(MATPREC)*back_warpnztot*(long)WARPSIZE); cudaMemcpy(proj_warpindex_d,proj_warpindex,sizeof(unsigned short)*proj_warpnztot*(long)WARPSIZE,cudaMemcpyHostToDevice); cudaMemcpy(proj_warpvalue_d,proj_warpvalue,sizeof(MATPREC)*proj_warpnztot*(long)WARPSIZE,cudaMemcpyHostToDevice); cudaMemcpy(back_warpindex_d,back_warpindex,sizeof(unsigned short)*back_warpnztot*(long)WARPSIZE,cudaMemcpyHostToDevice); cudaMemcpy(back_warpvalue_d,back_warpvalue,sizeof(MATPREC)*back_warpnztot*(long)WARPSIZE,cudaMemcpyHostToDevice); delete[] proj_warpindex; delete[] proj_warpvalue; delete[] back_warpindex; delete[] back_warpvalue; #endif //COMMUNICATION BUFFERS double commem = 0.0; commem += sizeof(VECPREC)*mynumpix*FFACTOR/1.0e9; commem += sizeof(VECPREC)*raynumout*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*socketsendcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*socketrecvcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*nodesendcommdispl[numproc_node]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*noderecvcommdispl[numproc_node]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR/1.0e9; cudaMalloc((void**)&tomobuff_d,sizeof(VECPREC)*mynumpix*FFACTOR); cudaMalloc((void**)&partbuff_d,sizeof(VECPREC)*raynumout*FFACTOR); cudaMalloc((void**)&socketreducesendbuff_d,sizeof(COMMPREC)*socketsendcommdispl[numproc_socket]*FFACTOR); cudaMalloc((void**)&socketreducerecvbuff_d,sizeof(COMMPREC)*socketrecvcommdispl[numproc_socket]*FFACTOR); cudaMalloc((void**)&nodereducesendbuff_d,sizeof(COMMPREC)*nodesendcommdispl[numproc_node]*FFACTOR); cudaMalloc((void**)&nodereducerecvbuff_d,sizeof(COMMPREC)*noderecvcommdispl[numproc_node]*FFACTOR); cudaMalloc((void**)&nodesendbuff_d,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR); cudaMalloc((void**)&noderecvbuff_d,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR); //HOST BUFFER cudaMallocHost((void**)&nodesendbuff_h,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR); cudaMallocHost((void**)&noderecvbuff_h,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR); //PACK AND UNPACK MAPS commem += sizeof(int)*socketsendcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(int)*socketrecvcommdispl[numproc_socket]*FFACTOR/1.0e9; commem += sizeof(int)*(socketreduceoutdispl[numproc_data]+1)/1.0e9; commem += sizeof(int)*socketreducedispl[socketreduceoutdispl[numproc_data]]/1.0e9; commem += sizeof(int)*socketreduceoutdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(int)*noderecvcommdispl[numproc_node]*FFACTOR/1.0e9; commem += sizeof(int)*(nodereduceoutdispl[numproc_data]+1)/1.0e9; commem += sizeof(int)*nodereducedispl[nodereduceoutdispl[numproc_data]]/1.0e9; commem += sizeof(int)*nodereduceoutdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(int)*nodereduceincdispl[numproc_data]*FFACTOR/1.0e9; commem += sizeof(int)*(mynumray+1)/1.0e9; commem += sizeof(int)*noderaydispl[mynumray]/1.0e9; cudaMalloc((void**)&socketpackmap_d,sizeof(int)*socketsendcommdispl[numproc_socket]*FFACTOR); cudaMalloc((void**)&socketunpackmap_d,sizeof(int)*socketrecvcommdispl[numproc_socket]*FFACTOR); cudaMalloc((void**)&socketreducedispl_d,sizeof(int)*(socketreduceoutdispl[numproc_data]+1)); cudaMalloc((void**)&socketreduceindex_d,sizeof(int)*socketreducedispl[socketreduceoutdispl[numproc_data]]); cudaMalloc((void**)&nodepackmap_d,sizeof(int)*socketreduceoutdispl[numproc_data]*FFACTOR); cudaMalloc((void**)&nodeunpackmap_d,sizeof(int)*noderecvcommdispl[numproc_node]*FFACTOR); cudaMalloc((void**)&nodereducedispl_d,sizeof(int)*(nodereduceoutdispl[numproc_data]+1)); cudaMalloc((void**)&nodereduceindex_d,sizeof(int)*nodereducedispl[nodereduceoutdispl[numproc_data]]); cudaMalloc((void**)&raypackmap_d,sizeof(int)*nodereduceoutdispl[numproc_data]*FFACTOR); cudaMalloc((void**)&rayunpackmap_d,sizeof(int)*nodereduceincdispl[numproc_data]*FFACTOR); cudaMalloc((void**)&noderaydispl_d,sizeof(int)*(mynumray+1)); cudaMalloc((void**)&noderayindex_d,sizeof(int)*noderaydispl[mynumray]); cudaMemcpy(socketpackmap_d,socketpackmap,sizeof(int)*socketsendcommdispl[numproc_socket]*FFACTOR,cudaMemcpyHostToDevice); cudaMemcpy(socketunpackmap_d,socketunpackmap,sizeof(int)*socketrecvcommdispl[numproc_socket]*FFACTOR,cudaMemcpyHostToDevice); cudaMemcpy(socketreducedispl_d,socketreducedispl,sizeof(int)*(socketreduceoutdispl[numproc_data]+1),cudaMemcpyHostToDevice); cudaMemcpy(socketreduceindex_d,socketreduceindex,sizeof(int)*socketreducedispl[socketreduceoutdispl[numproc_data]],cudaMemcpyHostToDevice); cudaMemcpy(nodepackmap_d,nodepackmap,sizeof(int)*socketreduceoutdispl[numproc_data]*FFACTOR,cudaMemcpyHostToDevice); cudaMemcpy(nodeunpackmap_d,nodeunpackmap,sizeof(int)*noderecvcommdispl[numproc_node]*FFACTOR,cudaMemcpyHostToDevice); cudaMemcpy(nodereducedispl_d,nodereducedispl,sizeof(int)*(nodereduceoutdispl[numproc_data]+1),cudaMemcpyHostToDevice); cudaMemcpy(nodereduceindex_d,nodereduceindex,sizeof(int)*nodereducedispl[nodereduceoutdispl[numproc_data]],cudaMemcpyHostToDevice); cudaMemcpy(raypackmap_d,raypackmap,sizeof(int)*nodereduceoutdispl[numproc_data]*FFACTOR,cudaMemcpyHostToDevice); cudaMemcpy(rayunpackmap_d,rayunpackmap,sizeof(int)*nodereduceincdispl[numproc_data]*FFACTOR,cudaMemcpyHostToDevice); cudaMemcpy(noderaydispl_d,noderaydispl,sizeof(int)*(mynumray+1),cudaMemcpyHostToDevice); cudaMemcpy(noderayindex_d,noderayindex,sizeof(int)*noderaydispl[mynumray],cudaMemcpyHostToDevice); double gpumem = projmem+backmem; double gpumems[numproc_data]; double batchmems[numproc_data]; double commems[numproc_data]; MPI_Allgather(&gpumem,1,MPI_DOUBLE,gpumems,1,MPI_DOUBLE,MPI_COMM_DATA); MPI_Allgather(&batchmem,1,MPI_DOUBLE,batchmems,1,MPI_DOUBLE,MPI_COMM_DATA); MPI_Allgather(&commem,1,MPI_DOUBLE,commems,1,MPI_DOUBLE,MPI_COMM_DATA); if(myid==0){ double gpumaxmem = 0.0; double batchmaxmem = 0.0; double commaxmem = 0.0; double totmaxmem = 0.0; double gputotmem = 0.0; double batchtotmem = 0.0; double commtotmem = 0.0; for(int p = 0; p < numproc_data; p++){ printf("PROC %d GPU MEMORY: %f GB + %f GB + %f GB = %f GB\n",p,gpumems[p],batchmems[p],commems[p],gpumems[p]+batchmems[p]+commems[p]); if(gpumems[p]>gpumaxmem)gpumaxmem=gpumems[p]; if(batchmems[p]>batchmaxmem)batchmaxmem=batchmems[p]; if(commems[p]>commaxmem)commaxmem=commems[p]; if(gpumems[p]+batchmems[p]+commems[p]>totmaxmem)totmaxmem=gpumems[p]+batchmems[p]+commems[p]; gputotmem += gpumems[p]; batchtotmem += batchmems[p]; commtotmem += commems[p]; } printf("MAX GPU MEMORY gpumem %f GB batchmem %f GB commem %f GB total %f GB\n",gpumaxmem,batchmaxmem,commaxmem,totmaxmem); printf("TOTAL GPU MEMORY gpumem %f GB + batchmem %f GB + commem %f GB = %f GB\n",gputotmem,batchtotmem,commtotmem,gputotmem+batchtotmem+commtotmem); } cudaFuncSetAttribute(kernel_project,cudaFuncAttributeMaxDynamicSharedMemorySize,(164-1)*1024); cudaFuncSetAttribute(kernel_project,cudaFuncAttributePreferredSharedMemoryCarveout,cudaSharedmemCarveoutMaxShared); cudaFuncAttributes funcAttributes; cudaFuncGetAttributes(&funcAttributes,kernel_project); if(myid==0){ printf("\n"); printf("SpMM Attributes\n"); printf("Binary Version: %d\n",funcAttributes.binaryVersion); printf("Cache Mode: %d\n",funcAttributes.cacheModeCA); printf("Constant Memory: %lu\n",funcAttributes.constSizeBytes); printf("Local Memory: %lu\n",funcAttributes.localSizeBytes); printf("Max Dynamic Shared Memory: %d\n",funcAttributes.maxDynamicSharedSizeBytes); printf("Max Threads per Block: %d\n",funcAttributes.maxThreadsPerBlock); printf("Number of Registers: %d\n",funcAttributes.numRegs); printf("Shared Memory Carveout: %d\n",funcAttributes.preferredShmemCarveout); printf("PTX Version %d\n",funcAttributes.ptxVersion); printf("Static Shared Memory: %lu\n",funcAttributes.sharedSizeBytes); printf("\n"); } cudaEventCreate(&start); cudaEventCreate(&stop); sendrequest = new MPI_Request[numproc_data]; recvrequest = new MPI_Request[numproc_data]; socketstream = new cudaStream_t[numproc_socket]; nodestream = new cudaStream_t[numproc_node]; for(int p = 0; p < numproc_socket; p++) cudaStreamCreate(&socketstream[p]); for(int p = 0; p < numproc_node; p++) cudaStreamCreate(&nodestream[p]); communications(); return; } void project(double *sino_d, double *tomo_d, double scale, int batchslice){ cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); double projecttime = MPI_Wtime(); //PARTIAL PROJECTION kernel_double2VECPREC<<<(mynumpix*FFACTOR+255)/256,256>>>(tomobuff_d,tomo_d,mynumpix*FFACTOR,scale); partial_project(); for(int slice = 0; slice < batchslice; slice += FFACTOR){ //MEMCPY DEVICE TO HOST cudaEventRecord(start); cudaMemcpy(nodesendbuff_h,nodesendbuff_d,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR,cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); pmtime += milliseconds/1e3; //HOST COMMUNICATION MPI_Barrier(MPI_COMM_DATA); double chtime = MPI_Wtime(); { int sendcount = 0; int recvcount = 0; for(int p = 0; p < numproc_data; p++) if(nodereduceout[p]){ MPI_Issend(nodesendbuff_h+nodereduceoutdispl[p]*FFACTOR,nodereduceout[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,sendrequest+sendcount); sendcount++; } for(int p = 0; p < numproc_data; p++) if(nodereduceinc[p]){ MPI_Irecv(noderecvbuff_h+nodereduceincdispl[p]*FFACTOR,nodereduceinc[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,recvrequest+recvcount); recvcount++; } #ifdef OVERLAP //PARTIAL PROJECTION if(slice+FFACTOR < batchslice){ kernel_double2VECPREC<<<(mynumpix*FFACTOR+255)/256,256>>>(tomobuff_d,tomo_d+(slice+FFACTOR)*mynumpix,mynumpix*FFACTOR,scale); partial_project(); } #endif MPI_Waitall(sendcount,sendrequest,MPI_STATUSES_IGNORE); MPI_Waitall(recvcount,recvrequest,MPI_STATUSES_IGNORE); } MPI_Barrier(MPI_COMM_DATA); pchtime += MPI_Wtime()-chtime; //if(myid==0)printf("rack time %e\n",MPI_Wtime()-chtime); //MEMCPY HOST TO DEVICE cudaEventRecord(start); cudaMemcpy(noderecvbuff_d,noderecvbuff_h,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR,cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); pmtime += milliseconds/1e3; //HOST REDUCTION cudaEventRecord(start); kernel_reducenopack<<<(mynumray+255)/256,256>>>(sino_d+slice*mynumray,noderecvbuff_d,noderaydispl_d,noderayindex_d,mynumray,nodereduceincdispl[numproc_data],rayunpackmap_d,1.0/scale); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); prtime += milliseconds/1e3; //#endif numproj++; #ifndef OVERLAP //PARTIAL PROJECTION if(slice+FFACTOR < batchslice){ kernel_double2VECPREC<<<(mynumpix*FFACTOR+255)/256,256>>>(tomobuff_d,tomo_d+(slice+FFACTOR)*mynumpix,mynumpix*FFACTOR,scale); partial_project(); } #endif } cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); ptime += MPI_Wtime()-projecttime; } void backproject(double *tomo_d, double *sino_d, double scale, int batchslice){ cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); double backprojecttime = MPI_Wtime(); //HOST SCATTER cudaEventRecord(start); kernel_scatternopack<<<(mynumray+255)/256,256>>>(sino_d,noderecvbuff_d,noderaydispl_d,noderayindex_d,mynumray,nodereduceincdispl[numproc_data],rayunpackmap_d,scale); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //MEMCPY DEVICE TO HOST cudaEventRecord(start); cudaMemcpy(noderecvbuff_h,noderecvbuff_d,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR,cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; //HOST COMMUNICATION MPI_Barrier(MPI_COMM_DATA); double chtime = MPI_Wtime(); { int sendcount = 0; int recvcount = 0; for(int p = 0; p < numproc_data; p++) if(nodereduceout[p]){ MPI_Irecv(nodesendbuff_h+nodereduceoutdispl[p]*FFACTOR,nodereduceout[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,sendrequest+sendcount); sendcount++; } for(int p = 0; p < numproc_data; p++) if(nodereduceinc[p]){ MPI_Issend(noderecvbuff_h+nodereduceincdispl[p]*FFACTOR,nodereduceinc[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,recvrequest+recvcount); recvcount++; } MPI_Waitall(sendcount,sendrequest,MPI_STATUSES_IGNORE); MPI_Waitall(recvcount,recvrequest,MPI_STATUSES_IGNORE); } MPI_Barrier(MPI_COMM_DATA); bchtime += MPI_Wtime()-chtime; //if(myid==0)printf("rack time %e\n",MPI_Wtime()-chtime); //MEMCPY HOST TO DEVICE cudaEventRecord(start); cudaMemcpy(nodesendbuff_d,nodesendbuff_h,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR,cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; for(int slice = 0; slice < batchslice; slice += FFACTOR){ double chtime; int sendcount = 0; int recvcount = 0; if(slice+FFACTOR < batchslice){ //HOST SCATTER cudaEventRecord(start); kernel_scatternopack<<<(mynumray+255)/256,256>>>(sino_d+(slice+FFACTOR)*mynumray,noderecvbuff_d,noderaydispl_d,noderayindex_d,mynumray,nodereduceincdispl[numproc_data],rayunpackmap_d,scale); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //MEMCPY DEVICE TO HOST cudaEventRecord(start); cudaMemcpy(noderecvbuff_h,noderecvbuff_d,sizeof(COMMPREC)*nodereduceincdispl[numproc_data]*FFACTOR,cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; //HOST COMMUNICATION MPI_Barrier(MPI_COMM_DATA); chtime = MPI_Wtime(); for(int p = 0; p < numproc_data; p++) if(nodereduceout[p]){ MPI_Irecv(nodesendbuff_h+nodereduceoutdispl[p]*FFACTOR,nodereduceout[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,sendrequest+sendcount); sendcount++; } for(int p = 0; p < numproc_data; p++) if(nodereduceinc[p]){ MPI_Issend(noderecvbuff_h+nodereduceincdispl[p]*FFACTOR,nodereduceinc[p]*FFACTOR*sizeof(COMMPREC),MPI_BYTE,p,0,MPI_COMM_DATA,recvrequest+recvcount); recvcount++; } } #ifdef OVERLAP //PARTIAL BACKPROJECTION partial_backproject(); kernel_VECPREC2double<<<(mynumpix*FFACTOR+255)/256,256>>>(tomo_d+slice*mynumpix,tomobuff_d,mynumpix*FFACTOR,1.0/scale); #endif if(slice+FFACTOR < batchslice){ MPI_Waitall(sendcount,sendrequest,MPI_STATUSES_IGNORE); MPI_Waitall(recvcount,recvrequest,MPI_STATUSES_IGNORE); MPI_Barrier(MPI_COMM_DATA); bchtime += MPI_Wtime()-chtime; //if(myid==0)printf("rack time %e\n",MPI_Wtime()-chtime); } #ifndef OVERLAP //PARTIAL BACKPROJECTION partial_backproject(); kernel_VECPREC2double<<<(mynumpix*FFACTOR+255)/256,256>>>(tomo_d+slice*mynumpix,tomobuff_d,mynumpix*FFACTOR,1.0/scale); #endif numback++; if(slice+FFACTOR < batchslice){ //MEMCPY HOST TO DEVICE cudaEventRecord(start); cudaMemcpy(nodesendbuff_d,nodesendbuff_h,sizeof(COMMPREC)*nodereduceoutdispl[numproc_data]*FFACTOR,cudaMemcpyHostToDevice); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); bmtime += milliseconds/1e3; } } cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_DATA); btime += MPI_Wtime()-backprojecttime; } void partial_project(){ cudaEventRecord(start); #ifdef MATRIX kernel_project<<<proj_numblocks,proj_blocksize,sizeof(VECPREC)*proj_buffsize*FFACTOR>>>(partbuff_d,tomobuff_d,proj_warpindval_d,raynumout,mynumpix,proj_buffdispl_d,proj_warpdispl_d,proj_mapdispl_d,proj_mapnz_d,proj_buffmap_d,proj_buffsize); #else kernel_project<<<proj_numblocks,proj_blocksize,sizeof(VECPREC)*proj_buffsize*FFACTOR>>>(partbuff_d,tomobuff_d,proj_warpindex_d,proj_warpvalue_d,raynumout,mynumpix,proj_buffdispl_d,proj_warpdispl_d,proj_mapdispl_d,proj_mapnz_d,proj_buffmap_d,proj_buffsize); #endif cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); //if(myid==0)printf("project %e milliseconds\n",milliseconds); pktime += milliseconds/1e3; //COMMUNICATION BUFFER kernel_VECPREC2COMMPREC<<<(raynumout*FFACTOR+255)/256,256>>>(socketreducesendbuff_d,partbuff_d,raynumout*FFACTOR,socketpackmap_d); cudaDeviceSynchronize(); //SOCKET COMMUNICATION MPI_Barrier(MPI_COMM_SOCKET); double cstime = MPI_Wtime(); for(int psend = 0; psend < numproc_socket; psend++) if(socketsendcomm[psend]) cudaMemcpyPeerAsync(socketrecvbuff_p[psend]+socketrecvbuffdispl_p[psend]*FFACTOR,socketrecvdevice_p[psend],socketreducesendbuff_d+socketsendcommdispl[psend]*FFACTOR,mydevice,sizeof(COMMPREC)*socketsendcomm[psend]*FFACTOR,socketstream[psend]); cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_SOCKET); pcstime += MPI_Wtime()-cstime; //if(myid==0)printf("socket time %e\n",MPI_Wtime()-cstime); //SOCKET REDUCTION cudaEventRecord(start); kernel_reduce<<<(socketreduceoutdispl[numproc_data]+255)/256,256>>>(nodereducesendbuff_d,socketreducerecvbuff_d,socketreducedispl_d,socketreduceindex_d,socketreduceoutdispl[numproc_data],socketrecvcommdispl[numproc_socket],nodepackmap_d,socketunpackmap_d); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); prtime += milliseconds/1e3; //NODE COMMUNICATION MPI_Barrier(MPI_COMM_NODE); double cntime = MPI_Wtime(); for(int psend = 0; psend < numproc_node; psend++) if(nodesendcomm[psend]) cudaMemcpyPeerAsync(noderecvbuff_p[psend]+noderecvbuffdispl_p[psend]*FFACTOR,noderecvdevice_p[psend],nodereducesendbuff_d+nodesendcommdispl[psend]*FFACTOR,mydevice,sizeof(COMMPREC)*nodesendcomm[psend]*FFACTOR,nodestream[psend]); cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_NODE); pcntime += MPI_Wtime()-cntime; //if(myid==0)printf("node time %e\n",MPI_Wtime()-cntime); //NODE REDUCTION cudaEventRecord(start); kernel_reduce<<<(nodereduceoutdispl[numproc_data]+255)/256,256>>>(nodesendbuff_d,nodereducerecvbuff_d,nodereducedispl_d,nodereduceindex_d,nodereduceoutdispl[numproc_data],noderecvcommdispl[numproc_node],raypackmap_d,nodeunpackmap_d); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); prtime += milliseconds/1e3; }; void partial_backproject(){ //NODE SCATTER cudaEventRecord(start); kernel_scatter<<<(nodereduceoutdispl[numproc_data]+255)/256,256>>>(nodesendbuff_d,nodereducerecvbuff_d,nodereducedispl_d,nodereduceindex_d,nodereduceoutdispl[numproc_data],noderecvcommdispl[numproc_node],raypackmap_d,nodeunpackmap_d); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //NODE COMMUNICATION MPI_Barrier(MPI_COMM_NODE); double cntime = MPI_Wtime(); for(int psend = 0; psend < numproc_node; psend++) if(nodesendcomm[psend]) cudaMemcpyPeerAsync(nodereducesendbuff_d+nodesendcommdispl[psend]*FFACTOR,mydevice,noderecvbuff_p[psend]+noderecvbuffdispl_p[psend]*FFACTOR,noderecvdevice_p[psend],sizeof(COMMPREC)*nodesendcomm[psend]*FFACTOR,nodestream[psend]); cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_NODE); bcntime += MPI_Wtime()-cntime; //if(myid==0)printf("node time %e\n",MPI_Wtime()-cntime); //SOCKET SCATTER cudaEventRecord(start); kernel_scatter<<<(socketreduceoutdispl[numproc_data]+255)/256,256>>>(nodereducesendbuff_d,socketreducerecvbuff_d,socketreducedispl_d,socketreduceindex_d,socketreduceoutdispl[numproc_data],socketrecvcommdispl[numproc_socket],nodepackmap_d,socketunpackmap_d); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); brtime += milliseconds/1e3; //SOCKET COMMUNICATION MPI_Barrier(MPI_COMM_SOCKET); double cstime = MPI_Wtime(); for(int psend = 0; psend < numproc_socket; psend++) if(socketsendcomm[psend]) cudaMemcpyPeerAsync(socketreducesendbuff_d+socketsendcommdispl[psend]*FFACTOR,mydevice,socketrecvbuff_p[psend]+socketrecvbuffdispl_p[psend]*FFACTOR,socketrecvdevice_p[psend],sizeof(COMMPREC)*socketsendcomm[psend]*FFACTOR,socketstream[psend]); cudaDeviceSynchronize(); MPI_Barrier(MPI_COMM_SOCKET); bcstime += MPI_Wtime()-cstime; //if(myid==0)printf("socket time %e\n",MPI_Wtime()-cstime); //BACKPROJECTION kernel_COMMPREC2VECPREC<<<(raynumout*FFACTOR+255)/256,256>>>(partbuff_d,socketreducesendbuff_d,raynumout*FFACTOR,socketpackmap_d); cudaEventRecord(start); #ifdef MATRIX kernel_project<<<back_numblocks,back_blocksize,sizeof(VECPREC)*back_buffsize*FFACTOR>>>(tomobuff_d,partbuff_d,back_warpindval_d,mynumpix,raynumout,back_buffdispl_d,back_warpdispl_d,back_mapdispl_d,back_mapnz_d,back_buffmap_d,back_buffsize); #else kernel_project<<<back_numblocks,back_blocksize,sizeof(VECPREC)*back_buffsize*FFACTOR>>>(tomobuff_d,partbuff_d,back_warpindex_d,back_warpvalue_d,mynumpix,raynumout,back_buffdispl_d,back_warpdispl_d,back_mapdispl_d,back_mapnz_d,back_buffmap_d,back_buffsize); #endif cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds,start,stop); //if(myid==0)printf("backproject %e milliseconds\n",milliseconds); bktime += milliseconds/1e3; }; __global__ void kernel_reduce(COMMPREC *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *packmap, int *unpackmap){ int row = blockIdx.x*blockDim.x+threadIdx.x; #ifdef MIXED float reduce[FFACTOR] = {0.0}; #else VECPREC reduce[FFACTOR] = {0.0}; #endif if(row < numrow){ for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) #ifdef MIXED reduce[f] += __half2float(x[unpackmap[f*numcol+ind]]); #else reduce[f] += x[unpackmap[f*numcol+ind]]; #endif } for(int f = 0; f < FFACTOR; f++) y[packmap[f*numrow+row]] = reduce[f]; } }; __global__ void kernel_reducenopack(double *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *unpackmap, double scale){ int row = blockIdx.x*blockDim.x+threadIdx.x; #ifdef MIXED float reduce[FFACTOR] = {0.0}; #else VECPREC reduce[FFACTOR] = {0.0}; #endif if(row < numrow){ for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) #ifdef MIXED reduce[f] += __half2float(x[unpackmap[f*numcol+ind]]); #else reduce[f] += x[unpackmap[f*numcol+ind]]; #endif } for(int f = 0; f < FFACTOR; f++) y[f*numrow+row] = (double)reduce[f]*scale; } }; __global__ void kernel_scatternopack(double *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *unpackmap, double scale){ int row = blockIdx.x*blockDim.x+threadIdx.x; VECPREC scatter[FFACTOR] = {0.0}; if(row < numrow){ for(int f = 0; f < FFACTOR; f++) scatter[f] = y[f*numrow+row]*scale; for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) x[unpackmap[f*numcol+ind]] = scatter[f]; } } }; __global__ void kernel_scatter(COMMPREC *y, COMMPREC *x, int *displ, int *index, int numrow, int numcol, int *packmap, int *unpackmap){ int row = blockIdx.x*blockDim.x+threadIdx.x; VECPREC scatter[FFACTOR] = {0.0}; if(row < numrow){ for(int f = 0; f < FFACTOR; f++) scatter[f] = y[packmap[f*numrow+row]]; for(int n = displ[row]; n < displ[row+1]; n++){ int ind = index[n]; for(int f = 0; f < FFACTOR; f++) x[unpackmap[f*numcol+ind]] = scatter[f]; } } }; __global__ void kernel_double2VECPREC(VECPREC *y, double *x,int dim, double scale){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[tid] = x[tid]*scale; }; __global__ void kernel_VECPREC2double(double *y, VECPREC *x,int dim, double scale){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[tid] = (double)x[tid]*scale; }; __global__ void kernel_VECPREC2COMMPREC(COMMPREC *y, VECPREC *x,int dim, int *packmap){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[packmap[tid]] = x[tid]; }; __global__ void kernel_COMMPREC2VECPREC(VECPREC *y, COMMPREC *x,int dim, int *unpackmap){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) y[tid] = x[unpackmap[tid]]; }; void copyD2D_kernel(double *a, double *b, int dim){ cudaMemcpy(a,b,sizeof(double)*dim,cudaMemcpyDeviceToDevice); }; void copyD2H_kernel(double *a, double *b, int dim){ cudaMemcpy(a,b,sizeof(double)*dim,cudaMemcpyDeviceToHost); }; void copyH2D_kernel(double *a, double *b, int dim){ cudaMemcpy(a,b,sizeof(double)*dim,cudaMemcpyHostToDevice); }; void init_kernel(double *a, int dim){ cudaMemset(a,0,sizeof(double)*dim); }; __global__ void kernel_saxpy(double *a, double *b, double coef, double *c, int dim){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) a[tid] = b[tid] + coef*c[tid]; }; void saxpy_kernel(double *a, double *b, double coef, double *c, int dim){ kernel_saxpy<<<(dim+255)/256,256>>>(a,b,coef,c,dim); }; __global__ void kernel_scale(double *a, double coef, int dim){ int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) a[tid] = coef*a[tid]; }; void scale_kernel(double *a, double coef, int dim){ kernel_scale<<<(dim+255)/256,256>>>(a,coef,dim); }; __global__ void kernel_dot(double *a, double *b, int dim, double *buffer){ extern __shared__ double temp[]; int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) temp[threadIdx.x] = a[tid]*b[tid]; else temp[threadIdx.x] = 0; for(int stride = blockDim.x/2; stride > 0; stride>>=1){ __syncthreads(); if(threadIdx.x < stride) temp[threadIdx.x] += temp[threadIdx.x+stride]; } if(threadIdx.x==0) buffer[blockIdx.x] = temp[0]; }; double dot_kernel(double *a, double *b, int dim){ int numblocks = (dim+255)/256; kernel_dot<<<numblocks,256,sizeof(double)*256>>>(a,b,dim,reducebuff_d); cudaMemcpy(reducebuff_h,reducebuff_d,sizeof(double)*numblocks,cudaMemcpyDeviceToHost); double reduce = 0.0; for(int n = 0; n < numblocks; n++) reduce += reducebuff_h[n]; MPI_Allreduce(MPI_IN_PLACE,&reduce,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_DATA); return reduce; }; __global__ void kernel_max(double *a, int dim, double *buffer){ extern __shared__ double temp[]; int tid = blockIdx.x*blockDim.x+threadIdx.x; if(tid < dim) temp[threadIdx.x] = a[tid]; else temp[threadIdx.x] = 0.0; for(int stride = blockDim.x/2; stride > 0; stride>>=1){ __syncthreads(); if(threadIdx.x < stride) if(temp[threadIdx.x+stride] > temp[threadIdx.x]) temp[threadIdx.x] = temp[threadIdx.x+stride]; } if(threadIdx.x==0) buffer[blockIdx.x] = temp[0]; }; double max_kernel(double *a, int dim){ int numblocks = (dim+255)/256; kernel_max<<<numblocks,256,sizeof(double)*256>>>(a,dim,reducebuff_d); cudaMemcpy(reducebuff_h,reducebuff_d,sizeof(double)*numblocks,cudaMemcpyDeviceToHost); double reduce = 0.0; for(int n = 0; n < numblocks; n++) if(reducebuff_h[n] > reduce) reduce = reducebuff_h[n]; MPI_Allreduce(MPI_IN_PLACE,&reduce,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_DATA); return reduce; };
e89de315a1cae098bb5d016b5a40384196071e3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2022 Mohamed Khaled <[email protected]> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "cuda/vector_helpers.cuh" extern "C" { /** * @brief function contains the main logic of chroma keying, and changes the alpahc channel with the suitable value * * @param src_tex texture U or texture UV , decided based on the passed is_uchar2 flag * @param src_tex_V texture V , used only if is_uchar2 flag is false * @param dst_A alpha channel destination * @param width_uv width of uv channels * @param height_uv height of uv channels * @param width width of alpha channel * @param height height of alpha channel * @param pitch pitch of alpha channel * @param x current x coordinate of pixel * @param y current y coordinate of pixel * @param chromakey_uv uv values for chroma keying * @param similarity similarity of keying * @param blend blend of keying */ __device__ static inline void change_alpha_channel( hipTextureObject_t src_tex, hipTextureObject_t src_tex_V, uchar *dst_A, int width_uv, int height_uv, int width, int height, int pitch, int x, int y, float2 chromakey_uv, float similarity, float blend) { int window_size = 3; int start_r = x - window_size / 2; int start_c = y - window_size / 2; int resize_ratio = width / width_uv; int counter = 0; float diff = 0.0f; float du, dv; uchar alpha_value; // loop over the eight neighbourhood of the current pixel(x,y) for (uchar i = 0; i < window_size; i++) { for (uchar j = 0; j < window_size; j++) { float u_value, v_value; int r = start_r + i; int c = start_c + j; if (r < 0 || r >= width_uv || c < 0 || c >= height_uv) continue; if (!src_tex_V) { float2 temp_uv = tex2D<float2>(src_tex, r, c); u_value = temp_uv.x; v_value = temp_uv.y; } else { u_value = tex2D<float>(src_tex, r, c); v_value = tex2D<float>(src_tex_V, r, c); } du = (u_value * 255.0f) - chromakey_uv.x; dv = (v_value * 255.0f) - chromakey_uv.y; diff += sqrtf((du * du + dv * dv) / (255.0f * 255.0f * 2.f)); counter++; } } if (counter > 0) diff = diff / counter; else diff /= 9.0f; if (blend>0.0001f) alpha_value = __saturatef((diff - similarity) / blend) * 255; else alpha_value = (diff < similarity) ? 0 : 255; //write the value in the alpha channel with regarding the ratio of (alpha_size : uv_size) for (uchar k = 0; k < resize_ratio; k++) { for (uchar l = 0; l < resize_ratio; l++) { int x_resize = x * resize_ratio + k; int y_resize = y * resize_ratio + l; int a_channel_resize = y_resize * pitch + x_resize; if (y_resize >= height || x_resize >= width) continue; dst_A[a_channel_resize] = alpha_value; } } } __global__ void Process_uchar( hipTextureObject_t src_tex_Y, hipTextureObject_t src_tex_U, hipTextureObject_t src_tex_V, uchar *dst_Y, uchar *dst_U, uchar *dst_V, uchar *dst_A, int width, int height, int pitch, int width_uv, int height_uv, int pitch_uv, float u_key, float v_key, float similarity, float blend) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= height || x >= width) return; dst_Y[y * pitch + x] = tex2D<float>(src_tex_Y, x, y)*255; if (y >= height_uv || x >= width_uv) return; int uv_index = y * pitch_uv + x; dst_U[uv_index] = tex2D<float>(src_tex_U, x, y) * 255; dst_V[uv_index] = tex2D<float>(src_tex_V, x, y) * 255; change_alpha_channel(src_tex_U, src_tex_V, dst_A, width_uv, height_uv, width, height, pitch, x, y, make_float2(u_key, v_key), similarity, blend); } __global__ void Process_uchar2( hipTextureObject_t src_tex_Y, hipTextureObject_t src_tex_UV, hipTextureObject_t unused1, uchar *dst_Y, uchar *dst_U, uchar *dst_V, uchar *dst_A, int width, int height, int pitch, int width_uv, int height_uv,int pitch_uv, float u_key, float v_key, float similarity, float blend) { int x = blockIdx.x * blockDim.x + threadIdx.x; // x coordinate of current pixel int y = blockIdx.y * blockDim.y + threadIdx.y; // y coordinate of current pixel if (y >= height || x >= width) return; dst_Y[y * pitch + x] = tex2D<float>(src_tex_Y, x, y) * 255; if (y >= height_uv || x >= width_uv) return; int uv_index = y * pitch_uv + x; float2 uv_temp = tex2D<float2>(src_tex_UV, x, y); dst_U[uv_index] = uv_temp.x * 255; dst_V[uv_index] = uv_temp.y * 255; change_alpha_channel(src_tex_UV, (hipTextureObject_t)nullptr, dst_A, width_uv, height_uv, width, height, pitch, x, y, make_float2(u_key, v_key), similarity, blend); } }
e89de315a1cae098bb5d016b5a40384196071e3b.cu
/* * Copyright (c) 2022 Mohamed Khaled <[email protected]> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "cuda/vector_helpers.cuh" extern "C" { /** * @brief function contains the main logic of chroma keying, and changes the alpahc channel with the suitable value * * @param src_tex texture U or texture UV , decided based on the passed is_uchar2 flag * @param src_tex_V texture V , used only if is_uchar2 flag is false * @param dst_A alpha channel destination * @param width_uv width of uv channels * @param height_uv height of uv channels * @param width width of alpha channel * @param height height of alpha channel * @param pitch pitch of alpha channel * @param x current x coordinate of pixel * @param y current y coordinate of pixel * @param chromakey_uv uv values for chroma keying * @param similarity similarity of keying * @param blend blend of keying */ __device__ static inline void change_alpha_channel( cudaTextureObject_t src_tex, cudaTextureObject_t src_tex_V, uchar *dst_A, int width_uv, int height_uv, int width, int height, int pitch, int x, int y, float2 chromakey_uv, float similarity, float blend) { int window_size = 3; int start_r = x - window_size / 2; int start_c = y - window_size / 2; int resize_ratio = width / width_uv; int counter = 0; float diff = 0.0f; float du, dv; uchar alpha_value; // loop over the eight neighbourhood of the current pixel(x,y) for (uchar i = 0; i < window_size; i++) { for (uchar j = 0; j < window_size; j++) { float u_value, v_value; int r = start_r + i; int c = start_c + j; if (r < 0 || r >= width_uv || c < 0 || c >= height_uv) continue; if (!src_tex_V) { float2 temp_uv = tex2D<float2>(src_tex, r, c); u_value = temp_uv.x; v_value = temp_uv.y; } else { u_value = tex2D<float>(src_tex, r, c); v_value = tex2D<float>(src_tex_V, r, c); } du = (u_value * 255.0f) - chromakey_uv.x; dv = (v_value * 255.0f) - chromakey_uv.y; diff += sqrtf((du * du + dv * dv) / (255.0f * 255.0f * 2.f)); counter++; } } if (counter > 0) diff = diff / counter; else diff /= 9.0f; if (blend>0.0001f) alpha_value = __saturatef((diff - similarity) / blend) * 255; else alpha_value = (diff < similarity) ? 0 : 255; //write the value in the alpha channel with regarding the ratio of (alpha_size : uv_size) for (uchar k = 0; k < resize_ratio; k++) { for (uchar l = 0; l < resize_ratio; l++) { int x_resize = x * resize_ratio + k; int y_resize = y * resize_ratio + l; int a_channel_resize = y_resize * pitch + x_resize; if (y_resize >= height || x_resize >= width) continue; dst_A[a_channel_resize] = alpha_value; } } } __global__ void Process_uchar( cudaTextureObject_t src_tex_Y, cudaTextureObject_t src_tex_U, cudaTextureObject_t src_tex_V, uchar *dst_Y, uchar *dst_U, uchar *dst_V, uchar *dst_A, int width, int height, int pitch, int width_uv, int height_uv, int pitch_uv, float u_key, float v_key, float similarity, float blend) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= height || x >= width) return; dst_Y[y * pitch + x] = tex2D<float>(src_tex_Y, x, y)*255; if (y >= height_uv || x >= width_uv) return; int uv_index = y * pitch_uv + x; dst_U[uv_index] = tex2D<float>(src_tex_U, x, y) * 255; dst_V[uv_index] = tex2D<float>(src_tex_V, x, y) * 255; change_alpha_channel(src_tex_U, src_tex_V, dst_A, width_uv, height_uv, width, height, pitch, x, y, make_float2(u_key, v_key), similarity, blend); } __global__ void Process_uchar2( cudaTextureObject_t src_tex_Y, cudaTextureObject_t src_tex_UV, cudaTextureObject_t unused1, uchar *dst_Y, uchar *dst_U, uchar *dst_V, uchar *dst_A, int width, int height, int pitch, int width_uv, int height_uv,int pitch_uv, float u_key, float v_key, float similarity, float blend) { int x = blockIdx.x * blockDim.x + threadIdx.x; // x coordinate of current pixel int y = blockIdx.y * blockDim.y + threadIdx.y; // y coordinate of current pixel if (y >= height || x >= width) return; dst_Y[y * pitch + x] = tex2D<float>(src_tex_Y, x, y) * 255; if (y >= height_uv || x >= width_uv) return; int uv_index = y * pitch_uv + x; float2 uv_temp = tex2D<float2>(src_tex_UV, x, y); dst_U[uv_index] = uv_temp.x * 255; dst_V[uv_index] = uv_temp.y * 255; change_alpha_channel(src_tex_UV, (cudaTextureObject_t)nullptr, dst_A, width_uv, height_uv, width, height, pitch, x, y, make_float2(u_key, v_key), similarity, blend); } }
63e7984007f09490df9835896ab042e8908c8034.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "solve.h" /*number of threads in a block*/ static const Int nThreads = 128; /*Matrix vector multiply*/ template <class T> __global__ void cudaMul(const Int* const rows, const Int* const cols, const Scalar* const an, const Int N, const T* const x, T* y ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { const Int start = rows[i]; const Int end = rows[i + 1]; T res = an[start] * x[cols[start]]; for (Int j = start + 1; j < end; j++) res -= an[j] * x[cols[j]]; y[i] = res; } } /*jacobi solver*/ template<class T> __global__ void cudaJacobi(const Int* const rows, const Int* const cols, const Scalar* const an, const T* const cF, T* const cF1, const T* const Su, T* r, const Int N, Scalar omega ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { const Int start = rows[i]; const Int end = rows[i + 1]; T res = Su[i], val = cF[i]; for (Int j = start + 1; j < end; j++) res += an[j] * cF[cols[j]]; res /= an[start]; r[i] = -val; val *= (1 - omega); val += res * (omega); r[i] += val; cF1[i] = val; } } /*Taxpy*/ template<class T,class T1> __global__ void cudaTaxpy(const Int N, const T1 alpha, const T* const x, const T* const y, T* const z ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { T temp; temp = x[i]; temp *= alpha; temp += y[i]; z[i] = temp; } } /*Txmy*/ template<class T,class T1> __global__ void cudaTxmy(const Int N, const T* const x, const T1* const y, T* const z ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { T temp; temp = x[i]; temp *= y[i]; z[i] = temp; } } /*Tdot*/ template <class T> __global__ void Tdot(const T* const a, const T* const b, T* const c, const Int N ) { __shared__ T cache[nThreads]; Int tid = threadIdx.x + blockIdx.x * blockDim.x; Int cacheIndex = threadIdx.x; T temp = T(0),val; while (tid < N) { val = a[tid]; val *= b[tid]; temp += val; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); Int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } template<class T> __host__ T cudaTdot(T* x, T* y, T* d_sum, T* sum, const Int nBlocks32, const Int N ) { hipLaunchKernelGGL(( Tdot) , dim3(nBlocks32), dim3(nThreads) , 0, 0, x,y,d_sum,N); hipMemcpy(sum,d_sum,nBlocks32 * sizeof(T),hipMemcpyDeviceToHost); T c = T(0); for (Int i = 0; i < nBlocks32; i++) c += sum[i]; return c; } /* ************************************** * CSR - compressed sparse row format * * Used for on GPU computation * * Propably for AMG too * **************************************/ template <class T> class CSRMatrix { public: std::vector<Int> rows; std::vector<Int> cols; std::vector<Scalar> an; std::vector<Scalar> anT; std::vector<T> cF; std::vector<T> Su; public: template <class T1> CSRMatrix(const MeshMatrix<T1>& A) { using namespace Mesh; using namespace DG; const Int N = A.ap.size(); const Int NN = A.ap.size() + A.an[0].size() + A.an[1].size() + (NPMAT ? (A.adg.size() - A.ap.size()) : 0); register Int i,f; /*resize*/ cF.resize(N); Su.resize(N); rows.reserve(N + 1); cols.reserve(NN); an.reserve(NN); anT.reserve(NN); /*source term*/ for(i = 0;i < N;i++) { Su[i] = A.Su[i]; cF[i] = (*A.cF)[i]; } /*fill matrix in CSR format.Diagonal element is always at the start of a row */ Int cn = 0; for(ii = 0;ii < gCells.size();ii++) { Cell& c = gCells[ii]; for(Int j = 0;j < NP;j++) { Int i = ii * NP + j; rows.push_back(cn); an.push_back(A.ap[i]); anT.push_back(A.ap[i]); cols.push_back(i); cn++; forEach(c,k) { f = c[k]; if(i == gFO[f]) { an.push_back(A.an[1][f]); anT.push_back(A.an[0][f]); cols.push_back(gFN[f]); cn++; } else { an.push_back(A.an[0][f]); anT.push_back(A.an[1][f]); cols.push_back(gFO[f]); cn++; } } if(NPMAT) { for(Int k = 0;k < NP;k++) { if(k == j) continue; an.push_back(A.adg[ii * NPMAT + j * NP + k]); anT.push_back(A.adg[ii * NPMAT + k * NP + j]); cols.push_back(); cn++; } } } } /*push extra row*/ rows.push_back(cn); } /*IO*/ friend std::ostream& operator << (std::ostream& os, const CSRMatrix& p) { os << p.rows << std::endl; os << p.cols << std::endl; os << p.an << std::endl; os << p.Su << std::endl; return os; } friend std::istream& operator >> (std::istream& is, CSRMatrix& p) { is >> p.rows; is >> p.cols; is >> p.an; is >> p.Su; return is; } /*end*/ }; /*********************************************** * Template class to solve equations on GPU * Solver must do many iterations to compensate * for the latency caused by copying matrix * from host to device. ***********************************************/ template<class T> __host__ void SolveT(const MeshMatrix<T>& M) { const Int N = Mesh::gBCSfield; const Int Nall = M.ap.size(); const Int nBlocks = (N + nThreads - 1) / nThreads; const Int nBlocks32 = ((nBlocks > 32) ? 32 : nBlocks); //info if(M.flags & M.SYMMETRIC) MP::printH("Symmetric : "); else MP::printH("Asymmetric : "); if(Controls::Solver == Controls::SOR) MP::print("SOR :"); else MP::print("PCG :"); /******************************* * variables on host & device *******************************/ Int* d_rows; Int* d_cols; Scalar* d_an; Scalar* d_anT; Scalar* d_pC; T* d_cF; T* d_Su; //PCG T* d_r,*d_r1; T* d_p,*d_p1,*d_AP,*d_AP1; T alpha,beta,o_rr,oo_rr; T local_res[2]; //reduction T* sum,*d_sum; /********************************* * allocate memory on device ********************************/ { CSRMatrix<T> A(M); hipMalloc((void**) &d_rows,A.rows.size() * sizeof(Int)); hipMalloc((void**) &d_cols,A.cols.size() * sizeof(Int)); hipMalloc((void**) &d_an, A.an.size() * sizeof(Scalar)); hipMalloc((void**) &d_cF, Nall * sizeof(T)); hipMalloc((void**) &d_Su, Nall * sizeof(T)); hipMemcpy(d_rows ,&A.rows[0] ,A.rows.size() * sizeof(Int), hipMemcpyHostToDevice); hipMemcpy(d_cols ,&A.cols[0] ,A.cols.size() * sizeof(Int), hipMemcpyHostToDevice); hipMemcpy(d_an ,&A.an[0] ,A.an.size() * sizeof(Scalar), hipMemcpyHostToDevice); hipMemcpy(d_cF ,&A.cF[0] ,Nall * sizeof(T), hipMemcpyHostToDevice); hipMemcpy(d_Su ,&A.Su[0] ,Nall * sizeof(T), hipMemcpyHostToDevice); hipMalloc((void**) &d_r, Nall * sizeof(T)); hipMalloc((void**) &d_sum, nBlocks32 * sizeof(T)); sum = (T*) malloc(nBlocks32 * sizeof(T)); if(Controls::Solver == Controls::SOR) { hipMalloc((void**) &d_AP,Nall * sizeof(T)); hipMemcpy( d_AP,d_cF,Nall * sizeof(T),hipMemcpyDeviceToDevice); } else if(Controls::Solver == Controls::PCG) { hipMalloc((void**) &d_p, Nall * sizeof(T)); hipMalloc((void**) &d_AP, Nall * sizeof(T)); { ScalarCellField pC = 1./M.ap; hipMalloc((void**) &d_pC,N * sizeof(Scalar)); hipMemcpy(d_pC,&pC[0],N * sizeof(Scalar),hipMemcpyHostToDevice); } if(!(M.flags & M.SYMMETRIC)) { hipMalloc((void**) &d_r1, Nall * sizeof(T)); hipMalloc((void**) &d_p1, Nall * sizeof(T)); hipMalloc((void**) &d_AP1, Nall * sizeof(T)); hipMalloc((void**) &d_anT,A.anT.size() * sizeof(Scalar)); hipMemcpy(d_anT,&A.anT[0],A.anT.size() * sizeof(Scalar), hipMemcpyHostToDevice); } } } /*CG*/ if(Controls::Solver == Controls::PCG) { hipMemset(d_r,0,Nall * sizeof(T)); hipMemset(d_p,0,Nall * sizeof(T)); hipLaunchKernelGGL(( cudaMul) , dim3(nBlocks), dim3(nThreads) , 0, 0, d_rows,d_cols,d_an,N,d_cF,d_AP); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,Scalar(-1),d_AP,d_Su,d_r); hipLaunchKernelGGL(( cudaTxmy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,d_r,d_pC,d_p); o_rr = cudaTdot(d_r,d_p,d_sum,sum,nBlocks32,N); } /*BiCG*/ if(!(M.flags & M.SYMMETRIC) && (Controls::Solver == Controls::PCG)) { hipMemcpy(d_r1,d_r,Nall * sizeof(T), hipMemcpyDeviceToDevice); hipMemcpy(d_p1,d_p,Nall * sizeof(T), hipMemcpyDeviceToDevice); } //iterate until convergence Scalar res = 0; Int iterations = 0; /* ************************** * Iterative solvers * *************************/ while(iterations < Controls::max_iterations) { /*counter*/ iterations++; /*select solver*/ if(Controls::Solver == Controls::SOR) { iterations++; hipLaunchKernelGGL(( cudaJacobi) , dim3(nBlocks), dim3(nThreads) , 0, 0, d_rows,d_cols,d_an,d_cF,d_AP,d_Su,d_r,N,Controls::SOR_omega); hipLaunchKernelGGL(( cudaJacobi) , dim3(nBlocks), dim3(nThreads) , 0, 0, d_rows,d_cols,d_an,d_AP,d_cF,d_Su,d_r,N,Controls::SOR_omega); } else if(M.flags & M.SYMMETRIC) { /*conjugate gradient : from wiki*/ hipLaunchKernelGGL(( cudaMul) , dim3(nBlocks), dim3(nThreads) , 0, 0, d_rows,d_cols,d_an,N,d_p,d_AP); oo_rr = cudaTdot(d_p,d_AP,d_sum,sum,nBlocks32,N); alpha = sdiv(o_rr , oo_rr); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,alpha,d_p,d_cF,d_cF); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,-alpha,d_AP,d_r,d_r); oo_rr = o_rr; hipLaunchKernelGGL(( cudaTxmy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,d_r,d_pC,d_AP); o_rr = cudaTdot(d_r,d_AP,d_sum,sum,nBlocks32,N); beta = sdiv(o_rr , oo_rr); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,beta,d_p,d_AP,d_p); /*end*/ } else { /* biconjugate gradient : from wiki */ hipLaunchKernelGGL(( cudaMul) , dim3(nBlocks), dim3(nThreads) , 0, 0, d_rows,d_cols,d_an,N,d_p,d_AP); hipLaunchKernelGGL(( cudaMul) , dim3(nBlocks), dim3(nThreads) , 0, 0, d_rows,d_cols,d_anT,N,d_p1,d_AP1); oo_rr = cudaTdot(d_p1,d_AP,d_sum,sum,nBlocks32,N); alpha = sdiv(o_rr , oo_rr); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,alpha,d_p,d_cF,d_cF); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,-alpha,d_AP,d_r,d_r); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,-alpha,d_AP1,d_r1,d_r1); oo_rr = o_rr; hipLaunchKernelGGL(( cudaTxmy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,d_r,d_pC,d_AP); hipLaunchKernelGGL(( cudaTxmy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,d_r1,d_pC,d_AP1); o_rr = cudaTdot(d_r1,d_AP,d_sum,sum,nBlocks32,N); beta = sdiv(o_rr , oo_rr); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,beta,d_p,d_AP,d_p); hipLaunchKernelGGL(( cudaTaxpy) , dim3(nBlocks), dim3(nThreads) , 0, 0, N,beta,d_p1,d_AP1,d_p1); } /* ********************************************* * calculate norm of residual & check convergence * **********************************************/ local_res[0] = cudaTdot(d_r,d_r,d_sum,sum,nBlocks32,N); local_res[1] = cudaTdot(d_cF,d_cF,d_sum,sum,nBlocks32,N); res = sqrt(mag(local_res[0]) / mag(local_res[1])); /*check convergence*/ if(res <= Controls::tolerance) break; } /***************************** * Copy result back to cpu *****************************/ //copy result hipMemcpy(&((*M.cF)[0]), d_cF, N * sizeof(T), hipMemcpyDeviceToHost); //update boundary conditons updateExplicitBCs(*M.cF); //info MP::print("Iterations %d Residue: %.5e\n",iterations,res); /********************************* * free device memory ********************************/ { hipFree(d_rows); hipFree(d_cols); hipFree(d_an); hipFree(d_cF); hipFree(d_Su); hipFree(d_r); hipFree(d_sum); free(sum); if(Controls::Solver == Controls::SOR) { hipFree(d_AP); } else if(Controls::Solver == Controls::PCG) { hipFree(d_p); hipFree(d_AP); hipFree(d_pC); if(!(M.flags & M.SYMMETRIC)) { hipFree(d_r1); hipFree(d_p1); hipFree(d_AP1); hipFree(d_anT); } } } /****************** * END ******************/ } /*************************** * Explicit instantiations *************************** void Solve(const MeshMatrix<Scalar>& A) { applyImplicitBCs(A); SolveT(A); } void Solve(const MeshMatrix<Vector>& A) { applyImplicitBCs(A); SolveT(A); } void Solve(const MeshMatrix<STensor>& A) { applyImplicitBCs(A); SolveT(A); } void Solve(const MeshMatrix<Tensor>& A) { applyImplicitBCs(A); SolveT(A); } /* ******************** * End * ********************/
63e7984007f09490df9835896ab042e8908c8034.cu
#include <cuda.h> #include "solve.h" /*number of threads in a block*/ static const Int nThreads = 128; /*Matrix vector multiply*/ template <class T> __global__ void cudaMul(const Int* const rows, const Int* const cols, const Scalar* const an, const Int N, const T* const x, T* y ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { const Int start = rows[i]; const Int end = rows[i + 1]; T res = an[start] * x[cols[start]]; for (Int j = start + 1; j < end; j++) res -= an[j] * x[cols[j]]; y[i] = res; } } /*jacobi solver*/ template<class T> __global__ void cudaJacobi(const Int* const rows, const Int* const cols, const Scalar* const an, const T* const cF, T* const cF1, const T* const Su, T* r, const Int N, Scalar omega ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { const Int start = rows[i]; const Int end = rows[i + 1]; T res = Su[i], val = cF[i]; for (Int j = start + 1; j < end; j++) res += an[j] * cF[cols[j]]; res /= an[start]; r[i] = -val; val *= (1 - omega); val += res * (omega); r[i] += val; cF1[i] = val; } } /*Taxpy*/ template<class T,class T1> __global__ void cudaTaxpy(const Int N, const T1 alpha, const T* const x, const T* const y, T* const z ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { T temp; temp = x[i]; temp *= alpha; temp += y[i]; z[i] = temp; } } /*Txmy*/ template<class T,class T1> __global__ void cudaTxmy(const Int N, const T* const x, const T1* const y, T* const z ) { Int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { T temp; temp = x[i]; temp *= y[i]; z[i] = temp; } } /*Tdot*/ template <class T> __global__ void Tdot(const T* const a, const T* const b, T* const c, const Int N ) { __shared__ T cache[nThreads]; Int tid = threadIdx.x + blockIdx.x * blockDim.x; Int cacheIndex = threadIdx.x; T temp = T(0),val; while (tid < N) { val = a[tid]; val *= b[tid]; temp += val; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); Int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } template<class T> __host__ T cudaTdot(T* x, T* y, T* d_sum, T* sum, const Int nBlocks32, const Int N ) { Tdot <<< nBlocks32, nThreads >>> (x,y,d_sum,N); cudaMemcpy(sum,d_sum,nBlocks32 * sizeof(T),cudaMemcpyDeviceToHost); T c = T(0); for (Int i = 0; i < nBlocks32; i++) c += sum[i]; return c; } /* ************************************** * CSR - compressed sparse row format * * Used for on GPU computation * * Propably for AMG too * **************************************/ template <class T> class CSRMatrix { public: std::vector<Int> rows; std::vector<Int> cols; std::vector<Scalar> an; std::vector<Scalar> anT; std::vector<T> cF; std::vector<T> Su; public: template <class T1> CSRMatrix(const MeshMatrix<T1>& A) { using namespace Mesh; using namespace DG; const Int N = A.ap.size(); const Int NN = A.ap.size() + A.an[0].size() + A.an[1].size() + (NPMAT ? (A.adg.size() - A.ap.size()) : 0); register Int i,f; /*resize*/ cF.resize(N); Su.resize(N); rows.reserve(N + 1); cols.reserve(NN); an.reserve(NN); anT.reserve(NN); /*source term*/ for(i = 0;i < N;i++) { Su[i] = A.Su[i]; cF[i] = (*A.cF)[i]; } /*fill matrix in CSR format.Diagonal element is always at the start of a row */ Int cn = 0; for(ii = 0;ii < gCells.size();ii++) { Cell& c = gCells[ii]; for(Int j = 0;j < NP;j++) { Int i = ii * NP + j; rows.push_back(cn); an.push_back(A.ap[i]); anT.push_back(A.ap[i]); cols.push_back(i); cn++; forEach(c,k) { f = c[k]; if(i == gFO[f]) { an.push_back(A.an[1][f]); anT.push_back(A.an[0][f]); cols.push_back(gFN[f]); cn++; } else { an.push_back(A.an[0][f]); anT.push_back(A.an[1][f]); cols.push_back(gFO[f]); cn++; } } if(NPMAT) { for(Int k = 0;k < NP;k++) { if(k == j) continue; an.push_back(A.adg[ii * NPMAT + j * NP + k]); anT.push_back(A.adg[ii * NPMAT + k * NP + j]); cols.push_back(); cn++; } } } } /*push extra row*/ rows.push_back(cn); } /*IO*/ friend std::ostream& operator << (std::ostream& os, const CSRMatrix& p) { os << p.rows << std::endl; os << p.cols << std::endl; os << p.an << std::endl; os << p.Su << std::endl; return os; } friend std::istream& operator >> (std::istream& is, CSRMatrix& p) { is >> p.rows; is >> p.cols; is >> p.an; is >> p.Su; return is; } /*end*/ }; /*********************************************** * Template class to solve equations on GPU * Solver must do many iterations to compensate * for the latency caused by copying matrix * from host to device. ***********************************************/ template<class T> __host__ void SolveT(const MeshMatrix<T>& M) { const Int N = Mesh::gBCSfield; const Int Nall = M.ap.size(); const Int nBlocks = (N + nThreads - 1) / nThreads; const Int nBlocks32 = ((nBlocks > 32) ? 32 : nBlocks); //info if(M.flags & M.SYMMETRIC) MP::printH("Symmetric : "); else MP::printH("Asymmetric : "); if(Controls::Solver == Controls::SOR) MP::print("SOR :"); else MP::print("PCG :"); /******************************* * variables on host & device *******************************/ Int* d_rows; Int* d_cols; Scalar* d_an; Scalar* d_anT; Scalar* d_pC; T* d_cF; T* d_Su; //PCG T* d_r,*d_r1; T* d_p,*d_p1,*d_AP,*d_AP1; T alpha,beta,o_rr,oo_rr; T local_res[2]; //reduction T* sum,*d_sum; /********************************* * allocate memory on device ********************************/ { CSRMatrix<T> A(M); cudaMalloc((void**) &d_rows,A.rows.size() * sizeof(Int)); cudaMalloc((void**) &d_cols,A.cols.size() * sizeof(Int)); cudaMalloc((void**) &d_an, A.an.size() * sizeof(Scalar)); cudaMalloc((void**) &d_cF, Nall * sizeof(T)); cudaMalloc((void**) &d_Su, Nall * sizeof(T)); cudaMemcpy(d_rows ,&A.rows[0] ,A.rows.size() * sizeof(Int), cudaMemcpyHostToDevice); cudaMemcpy(d_cols ,&A.cols[0] ,A.cols.size() * sizeof(Int), cudaMemcpyHostToDevice); cudaMemcpy(d_an ,&A.an[0] ,A.an.size() * sizeof(Scalar), cudaMemcpyHostToDevice); cudaMemcpy(d_cF ,&A.cF[0] ,Nall * sizeof(T), cudaMemcpyHostToDevice); cudaMemcpy(d_Su ,&A.Su[0] ,Nall * sizeof(T), cudaMemcpyHostToDevice); cudaMalloc((void**) &d_r, Nall * sizeof(T)); cudaMalloc((void**) &d_sum, nBlocks32 * sizeof(T)); sum = (T*) malloc(nBlocks32 * sizeof(T)); if(Controls::Solver == Controls::SOR) { cudaMalloc((void**) &d_AP,Nall * sizeof(T)); cudaMemcpy( d_AP,d_cF,Nall * sizeof(T),cudaMemcpyDeviceToDevice); } else if(Controls::Solver == Controls::PCG) { cudaMalloc((void**) &d_p, Nall * sizeof(T)); cudaMalloc((void**) &d_AP, Nall * sizeof(T)); { ScalarCellField pC = 1./M.ap; cudaMalloc((void**) &d_pC,N * sizeof(Scalar)); cudaMemcpy(d_pC,&pC[0],N * sizeof(Scalar),cudaMemcpyHostToDevice); } if(!(M.flags & M.SYMMETRIC)) { cudaMalloc((void**) &d_r1, Nall * sizeof(T)); cudaMalloc((void**) &d_p1, Nall * sizeof(T)); cudaMalloc((void**) &d_AP1, Nall * sizeof(T)); cudaMalloc((void**) &d_anT,A.anT.size() * sizeof(Scalar)); cudaMemcpy(d_anT,&A.anT[0],A.anT.size() * sizeof(Scalar), cudaMemcpyHostToDevice); } } } /*CG*/ if(Controls::Solver == Controls::PCG) { cudaMemset(d_r,0,Nall * sizeof(T)); cudaMemset(d_p,0,Nall * sizeof(T)); cudaMul <<< nBlocks, nThreads >>> (d_rows,d_cols,d_an,N,d_cF,d_AP); cudaTaxpy <<< nBlocks, nThreads >>> (N,Scalar(-1),d_AP,d_Su,d_r); cudaTxmy <<< nBlocks, nThreads >>> (N,d_r,d_pC,d_p); o_rr = cudaTdot(d_r,d_p,d_sum,sum,nBlocks32,N); } /*BiCG*/ if(!(M.flags & M.SYMMETRIC) && (Controls::Solver == Controls::PCG)) { cudaMemcpy(d_r1,d_r,Nall * sizeof(T), cudaMemcpyDeviceToDevice); cudaMemcpy(d_p1,d_p,Nall * sizeof(T), cudaMemcpyDeviceToDevice); } //iterate until convergence Scalar res = 0; Int iterations = 0; /* ************************** * Iterative solvers * *************************/ while(iterations < Controls::max_iterations) { /*counter*/ iterations++; /*select solver*/ if(Controls::Solver == Controls::SOR) { iterations++; cudaJacobi <<< nBlocks, nThreads >>> (d_rows,d_cols,d_an,d_cF,d_AP,d_Su,d_r,N,Controls::SOR_omega); cudaJacobi <<< nBlocks, nThreads >>> (d_rows,d_cols,d_an,d_AP,d_cF,d_Su,d_r,N,Controls::SOR_omega); } else if(M.flags & M.SYMMETRIC) { /*conjugate gradient : from wiki*/ cudaMul <<< nBlocks, nThreads >>> (d_rows,d_cols,d_an,N,d_p,d_AP); oo_rr = cudaTdot(d_p,d_AP,d_sum,sum,nBlocks32,N); alpha = sdiv(o_rr , oo_rr); cudaTaxpy <<< nBlocks, nThreads >>> (N,alpha,d_p,d_cF,d_cF); cudaTaxpy <<< nBlocks, nThreads >>> (N,-alpha,d_AP,d_r,d_r); oo_rr = o_rr; cudaTxmy <<< nBlocks, nThreads >>> (N,d_r,d_pC,d_AP); o_rr = cudaTdot(d_r,d_AP,d_sum,sum,nBlocks32,N); beta = sdiv(o_rr , oo_rr); cudaTaxpy <<< nBlocks, nThreads >>> (N,beta,d_p,d_AP,d_p); /*end*/ } else { /* biconjugate gradient : from wiki */ cudaMul <<< nBlocks, nThreads >>> (d_rows,d_cols,d_an,N,d_p,d_AP); cudaMul <<< nBlocks, nThreads >>> (d_rows,d_cols,d_anT,N,d_p1,d_AP1); oo_rr = cudaTdot(d_p1,d_AP,d_sum,sum,nBlocks32,N); alpha = sdiv(o_rr , oo_rr); cudaTaxpy <<< nBlocks, nThreads >>> (N,alpha,d_p,d_cF,d_cF); cudaTaxpy <<< nBlocks, nThreads >>> (N,-alpha,d_AP,d_r,d_r); cudaTaxpy <<< nBlocks, nThreads >>> (N,-alpha,d_AP1,d_r1,d_r1); oo_rr = o_rr; cudaTxmy <<< nBlocks, nThreads >>> (N,d_r,d_pC,d_AP); cudaTxmy <<< nBlocks, nThreads >>> (N,d_r1,d_pC,d_AP1); o_rr = cudaTdot(d_r1,d_AP,d_sum,sum,nBlocks32,N); beta = sdiv(o_rr , oo_rr); cudaTaxpy <<< nBlocks, nThreads >>> (N,beta,d_p,d_AP,d_p); cudaTaxpy <<< nBlocks, nThreads >>> (N,beta,d_p1,d_AP1,d_p1); } /* ********************************************* * calculate norm of residual & check convergence * **********************************************/ local_res[0] = cudaTdot(d_r,d_r,d_sum,sum,nBlocks32,N); local_res[1] = cudaTdot(d_cF,d_cF,d_sum,sum,nBlocks32,N); res = sqrt(mag(local_res[0]) / mag(local_res[1])); /*check convergence*/ if(res <= Controls::tolerance) break; } /***************************** * Copy result back to cpu *****************************/ //copy result cudaMemcpy(&((*M.cF)[0]), d_cF, N * sizeof(T), cudaMemcpyDeviceToHost); //update boundary conditons updateExplicitBCs(*M.cF); //info MP::print("Iterations %d Residue: %.5e\n",iterations,res); /********************************* * free device memory ********************************/ { cudaFree(d_rows); cudaFree(d_cols); cudaFree(d_an); cudaFree(d_cF); cudaFree(d_Su); cudaFree(d_r); cudaFree(d_sum); free(sum); if(Controls::Solver == Controls::SOR) { cudaFree(d_AP); } else if(Controls::Solver == Controls::PCG) { cudaFree(d_p); cudaFree(d_AP); cudaFree(d_pC); if(!(M.flags & M.SYMMETRIC)) { cudaFree(d_r1); cudaFree(d_p1); cudaFree(d_AP1); cudaFree(d_anT); } } } /****************** * END ******************/ } /*************************** * Explicit instantiations *************************** void Solve(const MeshMatrix<Scalar>& A) { applyImplicitBCs(A); SolveT(A); } void Solve(const MeshMatrix<Vector>& A) { applyImplicitBCs(A); SolveT(A); } void Solve(const MeshMatrix<STensor>& A) { applyImplicitBCs(A); SolveT(A); } void Solve(const MeshMatrix<Tensor>& A) { applyImplicitBCs(A); SolveT(A); } /* ******************** * End * ********************/
e2fc6e917cc1b074a1f4f5cc8bc3f3d6f924bf38.hip
// !!! This is a file automatically generated by hipify!!! // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2015, September 7 - October 6 // ### // ### // ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas // ### // ### // ### // ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED // ### // ### #include "aux.h" #include <iostream> #include <cmath> using namespace std; #define _USE_MATH_DEFINES // uncomment to use the camera //#define CAMERA int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // standard deviation of the Gaussian float sigma = 1; getParam("sigma", sigma, argc, argv); cout << "sigma: " << sigma << endl; sigma = abs(sigma); // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h, w, mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // Kernel matrix int r = (int)ceil(3 * sigma); int w_kernel = 2 * r + 1; int h_kernel = 2 * r + 1; int w_mid = r + 1; int h_mid = r + 1; cv::Mat mKernel = cv::Mat::zeros(h_kernel, w_kernel, CV_32FC1); // Normalize the kernel so that it sums up to 1 float val = 0; for (int i = 0; i < w_kernel; i++) { for (int j = 0; j < h_kernel; j++) { val = 1.0 / (2.0 * M_PI * sigma * sigma) * exp(-(pow(i - w_mid, 2) + pow(j - h_mid, 2)) / (2.0 * sigma * sigma)); mKernel.at<float>(i, j) = val; } } mKernel /= cv::sum(mKernel)[0]; // Create kernel for visualization, that has max value of 1 double minV, maxV; cv::Point minL, maxL; cv::minMaxLoc(mKernel, &minV, &maxV, &minL, &maxL); cv::Mat visKernel = mKernel / maxV; // Display the visualization kernel showImage("Kernel", visKernel, 100 + w + 40, 100 + h + 40); // show at position (x_from_left=100,y_from_above=100) // cv::waitKey(0); // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate the linearized kernel array float *imgKern = new float[w_kernel * h_kernel]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); convert_mat_to_layered (imgKern, mKernel); Timer timer; timer.start(); for (int c = 0; c < nc; c++) { for (int x = 0; x < w; x++) { for (int y = 0; y < h; y++) { float accumulated = 0; for (int a = -r; a <= r; a++) { for (int b = -r; b <= r; b++) { int x_act = min(max(x + a, 0), w - 1); int y_act = min(max(y + b, 0), h - 1); accumulated += imgIn[x_act + y_act * w + c * w * h] * imgKern[(r + a) + (r + b) * w_kernel]; } } imgOut[x + y * w + c * w * h] = accumulated; } } } timer.end(); float t = timer.get(); // elapsed time in seconds cout << "time: " << t*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
e2fc6e917cc1b074a1f4f5cc8bc3f3d6f924bf38.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2015, September 7 - October 6 // ### // ### // ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas // ### // ### // ### // ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED // ### // ### #include "aux.h" #include <iostream> #include <cmath> using namespace std; #define _USE_MATH_DEFINES // uncomment to use the camera //#define CAMERA int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // standard deviation of the Gaussian float sigma = 1; getParam("sigma", sigma, argc, argv); cout << "sigma: " << sigma << endl; sigma = abs(sigma); // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h, w, mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // Kernel matrix int r = (int)ceil(3 * sigma); int w_kernel = 2 * r + 1; int h_kernel = 2 * r + 1; int w_mid = r + 1; int h_mid = r + 1; cv::Mat mKernel = cv::Mat::zeros(h_kernel, w_kernel, CV_32FC1); // Normalize the kernel so that it sums up to 1 float val = 0; for (int i = 0; i < w_kernel; i++) { for (int j = 0; j < h_kernel; j++) { val = 1.0 / (2.0 * M_PI * sigma * sigma) * exp(-(pow(i - w_mid, 2) + pow(j - h_mid, 2)) / (2.0 * sigma * sigma)); mKernel.at<float>(i, j) = val; } } mKernel /= cv::sum(mKernel)[0]; // Create kernel for visualization, that has max value of 1 double minV, maxV; cv::Point minL, maxL; cv::minMaxLoc(mKernel, &minV, &maxV, &minL, &maxL); cv::Mat visKernel = mKernel / maxV; // Display the visualization kernel showImage("Kernel", visKernel, 100 + w + 40, 100 + h + 40); // show at position (x_from_left=100,y_from_above=100) // cv::waitKey(0); // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array float *imgIn = new float[(size_t)w*h*nc]; // allocate the linearized kernel array float *imgKern = new float[w_kernel * h_kernel]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); convert_mat_to_layered (imgKern, mKernel); Timer timer; timer.start(); for (int c = 0; c < nc; c++) { for (int x = 0; x < w; x++) { for (int y = 0; y < h; y++) { float accumulated = 0; for (int a = -r; a <= r; a++) { for (int b = -r; b <= r; b++) { int x_act = min(max(x + a, 0), w - 1); int y_act = min(max(y + b, 0), h - 1); accumulated += imgIn[x_act + y_act * w + c * w * h] * imgKern[(r + a) + (r + b) * w_kernel]; } } imgOut[x + y * w + c * w * h] = accumulated; } } } timer.end(); float t = timer.get(); // elapsed time in seconds cout << "time: " << t*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+w+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
c3e45f37e9e7291f5b3fb6754396d47768bab3fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/cudaconv2.cuh" /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCache. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * filterCache must be divisible by B_X*B_Y/32 * B_X*B_Y must be divisible by filterCache * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1]; __shared__ float shHidActs[filterCache][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int w = 0; w < filterCache; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCacheF. * * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by filterCacheF * filterCacheF must be divisible by filterCacheH * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; //const bool noFLoop = filterCacheF == filterCacheH; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters]; } } //#pragma unroll for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) { //conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod); const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages]; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread*B_X; i += B_X) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } else { shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } } __syncthreads(); // Do some actual computation // Using these variables causes register usage to go from 161 --> 123. // But nonetheless, the high-register version is faster. //const float* shF = &shFilters[threadIdx.y][fh-f]; //const float* const shF2 = &shFilters[threadIdx.y][fh]; //const float* shH = &shHidActs[0][threadIdx.x]; #pragma unroll for (int w = 0; w < filterCacheH; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * New Titan-optimized stuff. */ __device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX, const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; moduleIdx = my * numModulesX + mx; // out const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out } #define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ /* * Same loop as above but inverted. */ #define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \ for (int w = 0; w < filterCacheH; w++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters]; #define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters); #define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \ } #define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \ } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void __launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor // These launch bounds ensure 25% occupancy (128 registers used) // as oppposed to 13% (130 registers) achieved by defaults. conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [8] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0 : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages]; int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages; #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_W_TX(z); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_H_TX((z-4)/4,z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,0); } __syncthreads(); #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_W_TX(z+4); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_H_TX((z-4)/4, z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void //__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [6] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[moduleIdx * numImages]; int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } __syncthreads(); // It seems that there is no point explicitly interleaving loads // and computations because the scheduler does that anyway. IA_PRELOAD_LOOP2(0,0); IA_PRELOAD_LOOP2(1,0); IA_PRELOAD_LOOP2(2,0); IA_PRELOAD_LOOP2(3,0); IA_PRELOAD_LOOP2(4,0); IA_PRELOAD_LOOP2(5,0); IA_PRELOAD_LOOP2(6,0); IA_PRELOAD_LOOP2(7,0); IA_PRELOAD_LOOP2(8,0); IA_PRELOAD_LOOP2(9,0); IA_PRELOAD_LOOP2(10,0); IA_PRELOAD_LOOP2(11,0); IA_PRELOAD_LOOP2(12,0); IA_PRELOAD_LOOP2(13,0); IA_PRELOAD_LOOP2(14,0); IA_PRELOAD_LOOP2(15,0); IA_PRELOAD_W_TX(0); IA_PRELOAD_W_TX(1); IA_PRELOAD_W_TX(2); IA_PRELOAD_W_TX(3); IA_PRELOAD_W_TX(4); IA_PRELOAD_W_TX(5); IA_PRELOAD_H_TX(0,0); IA_PRELOAD_H_TX(0,1); IA_PRELOAD_H_TX(0,2); IA_PRELOAD_H_TX(0,3); IA_PRELOAD_H_TX(1,0); IA_PRELOAD_H_TX(1,1); IA_PRELOAD_H_TX(1,2); IA_PRELOAD_H_TX(1,3); IA_PRELOAD_H_TX(2,0); IA_PRELOAD_H_TX(2,1); IA_PRELOAD_H_TX(2,2); IA_PRELOAD_H_TX(2,3); IA_PRELOAD_H_TX(3,0); IA_PRELOAD_H_TX(3,1); IA_PRELOAD_H_TX(3,2); IA_PRELOAD_H_TX(3,3); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that. assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread, imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4); colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 12 : numFilterColors % 32 == 0 ? 8 : numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); // NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!! } else if (numFilterColors > 3) { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } const bool scale = scaleTargets != 0; hipStream_t stream = NVMatrix::getDefaultStream(); // hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); // hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, // hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, // imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); //return; // printf("conv: %d\n", conv); // printf("scale: %d\n", scale); // printf("checkCaseBounds: %d\n", checkCaseBounds); // printf("numFilterColors: %d\n", numFilterColors); // printf("numImages: %d\n", numImages); // hipStream_t stream = NVMatrix::getDefaultStream(); if (conv == true) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, true >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } } else if (conv == false) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, false >), dim3(blocks), dim3(threads), 0, stream, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } } getLastCudaError("imgActs: kernel execution failed"); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); }
c3e45f37e9e7291f5b3fb6754396d47768bab3fd.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../include/cudaconv2.cuh" /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCache. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * filterCache must be divisible by B_X*B_Y/32 * B_X*B_Y must be divisible by filterCache * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1]; __shared__ float shHidActs[filterCache][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int w = 0; w < filterCache; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCacheF. * * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by filterCacheF * filterCacheF must be divisible by filterCacheH * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; //const bool noFLoop = filterCacheF == filterCacheH; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters]; } } //#pragma unroll for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) { //conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod); const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages]; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread*B_X; i += B_X) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } else { shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } } __syncthreads(); // Do some actual computation // Using these variables causes register usage to go from 161 --> 123. // But nonetheless, the high-register version is faster. //const float* shF = &shFilters[threadIdx.y][fh-f]; //const float* const shF2 = &shFilters[threadIdx.y][fh]; //const float* shH = &shHidActs[0][threadIdx.x]; #pragma unroll for (int w = 0; w < filterCacheH; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * New Titan-optimized stuff. */ __device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX, const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; moduleIdx = my * numModulesX + mx; // out const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out } #define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ /* * Same loop as above but inverted. */ #define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \ for (int w = 0; w < filterCacheH; w++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters]; #define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters); #define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \ } #define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \ } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void __launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor // These launch bounds ensure 25% occupancy (128 registers used) // as oppposed to 13% (130 registers) achieved by defaults. conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [8] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0 : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages]; int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages; #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_W_TX(z); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_H_TX((z-4)/4,z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,0); } __syncthreads(); #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_W_TX(z+4); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_H_TX((z-4)/4, z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void //__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [6] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[moduleIdx * numImages]; int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } __syncthreads(); // It seems that there is no point explicitly interleaving loads // and computations because the scheduler does that anyway. IA_PRELOAD_LOOP2(0,0); IA_PRELOAD_LOOP2(1,0); IA_PRELOAD_LOOP2(2,0); IA_PRELOAD_LOOP2(3,0); IA_PRELOAD_LOOP2(4,0); IA_PRELOAD_LOOP2(5,0); IA_PRELOAD_LOOP2(6,0); IA_PRELOAD_LOOP2(7,0); IA_PRELOAD_LOOP2(8,0); IA_PRELOAD_LOOP2(9,0); IA_PRELOAD_LOOP2(10,0); IA_PRELOAD_LOOP2(11,0); IA_PRELOAD_LOOP2(12,0); IA_PRELOAD_LOOP2(13,0); IA_PRELOAD_LOOP2(14,0); IA_PRELOAD_LOOP2(15,0); IA_PRELOAD_W_TX(0); IA_PRELOAD_W_TX(1); IA_PRELOAD_W_TX(2); IA_PRELOAD_W_TX(3); IA_PRELOAD_W_TX(4); IA_PRELOAD_W_TX(5); IA_PRELOAD_H_TX(0,0); IA_PRELOAD_H_TX(0,1); IA_PRELOAD_H_TX(0,2); IA_PRELOAD_H_TX(0,3); IA_PRELOAD_H_TX(1,0); IA_PRELOAD_H_TX(1,1); IA_PRELOAD_H_TX(1,2); IA_PRELOAD_H_TX(1,3); IA_PRELOAD_H_TX(2,0); IA_PRELOAD_H_TX(2,1); IA_PRELOAD_H_TX(2,2); IA_PRELOAD_H_TX(2,3); IA_PRELOAD_H_TX(3,0); IA_PRELOAD_H_TX(3,1); IA_PRELOAD_H_TX(3,2); IA_PRELOAD_H_TX(3,3); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs.getNumCols(); int numFilters = filters.getNumCols(); int numModules = hidActs.getNumRows() / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that. assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs.getNumRows() == numModules * numFilters); assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesY * numModulesX); assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0); assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); assert(moduleStride <= filterSize); assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread, imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4); colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 12 : numFilterColors % 32 == 0 ? 8 : numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; assert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); // NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!! } else if (numFilterColors > 3) { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix targets.resize(numImgColors*imgPixels, numImages); } else { assert(targets.getNumRows() == numImgColors * imgPixels); assert(targets.getNumCols() == numImages); } const bool scale = scaleTargets != 0; cudaStream_t stream = NVMatrix::getDefaultStream(); // cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); // conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>( // hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, // imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); //return; // printf("conv: %d\n", conv); // printf("scale: %d\n", scale); // printf("checkCaseBounds: %d\n", checkCaseBounds); // printf("numFilterColors: %d\n", numFilterColors); // printf("numImages: %d\n", numImages); // cudaStream_t stream = NVMatrix::getDefaultStream(); if (conv == true) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, true, true ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } } else if (conv == false) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 4, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getTextureObject(), filters.getTextureObject(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 48 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 32 == 0) { if (numFilters % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 16 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors % 8 == 0) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors == 1) { if (numFilters % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, true, false ><<<blocks, threads, 0, stream>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } } getLastCudaError("imgActs: kernel execution failed"); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); }
fa715746dd9313196f754a051861a3967edf5647.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define CHECK(e) { int res = (e); if (res) printf("CUDA ERROR %d\n", res); } #define CHANNEL 3 // Struct for measuring performance struct GpuTimer{ hipEvent_t start; hipEvent_t stop; GpuTimer(){ hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer(){ hipEventDestroy(start); hipEventDestroy(stop); } void Start(){ hipEventRecord(start, 0); } void Stop(){ hipEventRecord(stop, 0); } float Elapsed(){ float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; struct Image { int width; int height; unsigned int bytes; unsigned char *img; unsigned char *dev_img; }; // Reads a color ppm image file and saves the data in the provided Image structure. // The max_col_val is set to the value read from the input file. // This is used later for writing the output image. int readInpImg(const char * fname, Image & source, int & max_col_val) { FILE *src; if (!(src = fopen(fname, "rb"))){ printf("Couldn't open file %s for reading.\n", fname); return 1; } char p, s; fscanf(src, "%c%c\n", &p, &s); if (p != 'P' || s != '6'){ // Is it a valid format? printf("Not a valid PPM file (%c %c)\n", p, s); exit(1); } fscanf(src, "%d %d\n", &source.width, &source.height); fscanf(src, "%d\n", &max_col_val); int pixels = source.width * source.height; source.bytes = pixels * CHANNEL; // CHANNEL = 3 => colored image with r, g, and b channels source.img = (unsigned char *)malloc(source.bytes); if (fread(source.img, sizeof(unsigned char), source.bytes, src) != source.bytes){ printf("Error reading file.\n"); exit(1); } fclose(src); return 0; } // Write a color ppm image into a file. // Image structure represents the image in the memory. int writeOutImg(const char * fname, const Image & rotated, const int max_col_val) { FILE *out; if (!(out = fopen(fname, "wb"))){ printf("Couldn't open file for output.\n"); return 1; } fprintf(out, "P6\n%d %d\n%d\n", rotated.width, rotated.height, max_col_val); if (fwrite(rotated.dev_img, sizeof(unsigned char), rotated.bytes, out) != rotated.bytes){ printf("Error writing file.\n"); return 1; } fclose(out); return 0; } // To be launched on CPU void rotate_90_CPU(unsigned char in[], unsigned char out[], int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int index_in = i * width * CHANNEL + j * CHANNEL; int index_out = j * height * CHANNEL + height * CHANNEL - (i + 1) * CHANNEL; out[index_out] = in[index_in]; out[index_out + 1] = in[index_in + 1]; out[index_out + 2] = in[index_in + 2]; } } } // To be launched on a single thread __global__ void rotate_90_serial(unsigned char in[], unsigned char out[], int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int index_in = i * width * CHANNEL + j * CHANNEL; int index_out = j * height * CHANNEL + height * CHANNEL - (i + 1) * CHANNEL; out[index_out] = in[index_in]; out[index_out + 1] = in[index_in + 1]; out[index_out + 2] = in[index_in + 2]; } } } // To be launched with one thread per element __global__ void rotate_90_parallel_per_element(unsigned char in[], unsigned char out[], int height, int width) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int index_in = i * width * CHANNEL + j * CHANNEL; int index_out = j * height * CHANNEL + height * CHANNEL - (i + 1) * CHANNEL; int image_size = height * width * CHANNEL; if(index_in < image_size && index_in >= 0 && index_out < image_size && index_out >= 0){ out[index_out] = in[index_in]; out[index_out + 1] = in[index_in + 1]; out[index_out + 2] = in[index_in + 2]; } } // To be launched with one thread per element. Thread blocks read & write tiles in coalesced fashion. __global__ void rotate_90_parallel_per_element_tiled(unsigned char in[], unsigned char out[], int height, int width, unsigned int tile_size) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * tile_size, in_corner_j = blockIdx.y * tile_size * CHANNEL; int out_corner_i = blockIdx.y * tile_size * CHANNEL, out_corner_j = blockIdx.x * tile_size; int x = threadIdx.x, y = threadIdx.y; int index_tile = x * (tile_size)*CHANNEL + y * CHANNEL; int index_in = in_corner_i * width * CHANNEL + x * width *CHANNEL + in_corner_j + y * CHANNEL; int image_size = height * width * CHANNEL; int total_tile_size = tile_size * tile_size * CHANNEL; extern __shared__ unsigned char tile[]; // Coalesced read from global mem, rotated write into shared mem: if(index_tile < total_tile_size && index_tile >= 0 && index_in < image_size && index_in >= 0){ tile[index_tile] = in[index_in]; tile[index_tile+1] = in[index_in+1]; tile[index_tile+2] = in[index_in+2]; } __syncthreads(); int index_out = out_corner_i*height + y * height * CHANNEL + height * CHANNEL - (x+1) * CHANNEL - out_corner_j * CHANNEL; // Read from shared mem, coalesced write to global mem: if(index_tile < total_tile_size && index_tile >= 0 && index_out < image_size && index_out >= 0 && index_in < image_size && index_in >= 0){ out[index_out] = tile[index_tile]; out[index_out + 1] = tile[index_tile + 1]; out[index_out + 2] = tile[index_tile + 2]; } } // To be launched with one thread per element. Thread blocks read & write tiles in coalesced fashion. // Shared memory array padded to avoid bank conflicts. __global__ void rotate_90_parallel_per_element_tiled_padded(unsigned char in[], unsigned char out[], int height, int width, unsigned int tile_size) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * tile_size, in_corner_j = blockIdx.y * tile_size * CHANNEL; int out_corner_i = blockIdx.y * tile_size * CHANNEL, out_corner_j = blockIdx.x * tile_size; int x = threadIdx.x, y = threadIdx.y; int image_size = height * width * CHANNEL; int total_tile_size = tile_size * (tile_size + 1) * CHANNEL ; int index_tile = x * (tile_size + 1)*CHANNEL + y * CHANNEL; int index_in = in_corner_i * width * CHANNEL + x * width *CHANNEL + in_corner_j + y * CHANNEL; extern __shared__ unsigned char tile[]; // coalesced read from global mem, rotated write into shared mem: if(index_tile < total_tile_size && index_tile >= 0 && index_in < image_size){ tile[index_tile] = in[index_in]; tile[index_tile+1] = in[index_in+1]; tile[index_tile+2] = in[index_in+2]; } __syncthreads(); int index_out = out_corner_i*height + y * height * CHANNEL + height * CHANNEL - (x+1)*CHANNEL - out_corner_j*CHANNEL; // Read from shared mem, coalesced write to global mem: if(index_tile < total_tile_size && index_tile >= 0 && index_out < image_size && index_out >= 0 && index_in < image_size && index_in >=0 ){ out[index_out] = tile[index_tile]; out[index_out + 1] = tile[index_tile + 1]; out[index_out + 2] = tile[index_tile + 2]; } } int main(int argc, char **argv){ if (argc != 3){ printf("Usage: exec filename kernel\n"); exit(1); } char *fname = argv[1]; char kname[100] = ""; // kernel name int choice = atoi(argv[2]); // kernel choice Image source; int max_col_val; GpuTimer timer; unsigned char *d_in, *d_out; // Read the input file if (readInpImg(fname, source, max_col_val) != 0) exit(1); source.dev_img = (unsigned char *)malloc(source.bytes); CHECK(hipMalloc(&d_in, source.bytes)); CHECK(hipMalloc(&d_out, source.bytes)); CHECK(hipMemcpy(d_in, source.img, source.bytes, hipMemcpyHostToDevice)); // Run selected kernel switch (choice) { case 1: // Serial execution on GPU, i.e. creating ONLY ONE thread strcpy(kname, "Serial execution on GPU"); timer.Start(); hipLaunchKernelGGL(( rotate_90_serial) , dim3(1), dim3(1), 0, 0, d_in, d_out, source.height, source.width); timer.Stop(); CHECK(hipMemcpy(source.dev_img, d_out, source.bytes, hipMemcpyDeviceToHost)); break; case 2: { // One thread per pixel strcpy(kname, "One thread per pixel"); int k = 32; dim3 blocks(ceil((float)source.height / (float)k), ceil((float)source.width / (float)k)); // blocks per grid (using ceil in case height or width are not multiple of k) dim3 threads(k, k); // threads per block timer.Start(); hipLaunchKernelGGL(( rotate_90_parallel_per_element) , dim3(blocks), dim3(threads), 0, 0, d_in, d_out, source.height, source.width); timer.Stop(); CHECK(hipMemcpy(source.dev_img, d_out, source.bytes, hipMemcpyDeviceToHost)); break; } case 3: { // One thread per pixel - tiled (16 X 16) strcpy(kname, "One thread per pixel - tiled (16 X 16)"); int k = 16; // tile size is k x k unsigned int shmem_size = k * k * CHANNEL * sizeof(unsigned char); dim3 blocks(ceil((float)source.height / (float)k), ceil((float)source.width / (float)k)); // blocks per grid dim3 threads(k, k); // threads per block timer.Start(); hipLaunchKernelGGL(( rotate_90_parallel_per_element_tiled) , dim3(blocks), dim3(threads), shmem_size, 0, d_in, d_out, source.height, source.width, k); timer.Stop(); CHECK(hipMemcpy(source.dev_img, d_out, source.bytes, hipMemcpyDeviceToHost)); break; } case 4: { // One thread per matrix element - tiled (16x16) - no shared mem conflict strcpy(kname, "One thread per matrix element - tiled (16x16) - no shared mem conflict"); int k = 16; // tile size is k x k dim3 blocks(ceil((float)source.height / (float)k), ceil((float)source.width / (float)k)); // blocks per grid dim3 threads(k, k); // threads per block unsigned int shmem_size = k * (k+1) * CHANNEL * sizeof(unsigned char); timer.Start(); hipLaunchKernelGGL(( rotate_90_parallel_per_element_tiled_padded) , dim3(blocks), dim3(threads), shmem_size, 0, d_in, d_out, source.height, source.width, k); timer.Stop(); CHECK(hipMemcpy(source.dev_img, d_out, source.bytes, hipMemcpyDeviceToHost)); break; } default: printf("Choose a kernel between 1 and 4"); exit(1); } printf("\nRotating Image \"%s\" with Height = %d and Width = %d.\nUsing kernel %d: %s\nElapsed time: %g ms.\n\n", argv[1], source.height, source.width, choice, kname, timer.Elapsed()); // Swap height and width for the rotated image int temp = source.height; source.height = source.width; source.width = temp; // Write the output file if (writeOutImg("rotated.ppm", source, max_col_val) != 0) // For demonstration, the input file is written to a new file named "rotated.ppm" exit(1); // free up the allocated memory free(source.img); free(source.dev_img); CHECK(hipFree(d_in)); CHECK(hipFree(d_out)); exit(0); }
fa715746dd9313196f754a051861a3967edf5647.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #define CHECK(e) { int res = (e); if (res) printf("CUDA ERROR %d\n", res); } #define CHANNEL 3 // Struct for measuring performance struct GpuTimer{ cudaEvent_t start; cudaEvent_t stop; GpuTimer(){ cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer(){ cudaEventDestroy(start); cudaEventDestroy(stop); } void Start(){ cudaEventRecord(start, 0); } void Stop(){ cudaEventRecord(stop, 0); } float Elapsed(){ float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; struct Image { int width; int height; unsigned int bytes; unsigned char *img; unsigned char *dev_img; }; // Reads a color ppm image file and saves the data in the provided Image structure. // The max_col_val is set to the value read from the input file. // This is used later for writing the output image. int readInpImg(const char * fname, Image & source, int & max_col_val) { FILE *src; if (!(src = fopen(fname, "rb"))){ printf("Couldn't open file %s for reading.\n", fname); return 1; } char p, s; fscanf(src, "%c%c\n", &p, &s); if (p != 'P' || s != '6'){ // Is it a valid format? printf("Not a valid PPM file (%c %c)\n", p, s); exit(1); } fscanf(src, "%d %d\n", &source.width, &source.height); fscanf(src, "%d\n", &max_col_val); int pixels = source.width * source.height; source.bytes = pixels * CHANNEL; // CHANNEL = 3 => colored image with r, g, and b channels source.img = (unsigned char *)malloc(source.bytes); if (fread(source.img, sizeof(unsigned char), source.bytes, src) != source.bytes){ printf("Error reading file.\n"); exit(1); } fclose(src); return 0; } // Write a color ppm image into a file. // Image structure represents the image in the memory. int writeOutImg(const char * fname, const Image & rotated, const int max_col_val) { FILE *out; if (!(out = fopen(fname, "wb"))){ printf("Couldn't open file for output.\n"); return 1; } fprintf(out, "P6\n%d %d\n%d\n", rotated.width, rotated.height, max_col_val); if (fwrite(rotated.dev_img, sizeof(unsigned char), rotated.bytes, out) != rotated.bytes){ printf("Error writing file.\n"); return 1; } fclose(out); return 0; } // To be launched on CPU void rotate_90_CPU(unsigned char in[], unsigned char out[], int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int index_in = i * width * CHANNEL + j * CHANNEL; int index_out = j * height * CHANNEL + height * CHANNEL - (i + 1) * CHANNEL; out[index_out] = in[index_in]; out[index_out + 1] = in[index_in + 1]; out[index_out + 2] = in[index_in + 2]; } } } // To be launched on a single thread __global__ void rotate_90_serial(unsigned char in[], unsigned char out[], int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int index_in = i * width * CHANNEL + j * CHANNEL; int index_out = j * height * CHANNEL + height * CHANNEL - (i + 1) * CHANNEL; out[index_out] = in[index_in]; out[index_out + 1] = in[index_in + 1]; out[index_out + 2] = in[index_in + 2]; } } } // To be launched with one thread per element __global__ void rotate_90_parallel_per_element(unsigned char in[], unsigned char out[], int height, int width) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int index_in = i * width * CHANNEL + j * CHANNEL; int index_out = j * height * CHANNEL + height * CHANNEL - (i + 1) * CHANNEL; int image_size = height * width * CHANNEL; if(index_in < image_size && index_in >= 0 && index_out < image_size && index_out >= 0){ out[index_out] = in[index_in]; out[index_out + 1] = in[index_in + 1]; out[index_out + 2] = in[index_in + 2]; } } // To be launched with one thread per element. Thread blocks read & write tiles in coalesced fashion. __global__ void rotate_90_parallel_per_element_tiled(unsigned char in[], unsigned char out[], int height, int width, unsigned int tile_size) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * tile_size, in_corner_j = blockIdx.y * tile_size * CHANNEL; int out_corner_i = blockIdx.y * tile_size * CHANNEL, out_corner_j = blockIdx.x * tile_size; int x = threadIdx.x, y = threadIdx.y; int index_tile = x * (tile_size)*CHANNEL + y * CHANNEL; int index_in = in_corner_i * width * CHANNEL + x * width *CHANNEL + in_corner_j + y * CHANNEL; int image_size = height * width * CHANNEL; int total_tile_size = tile_size * tile_size * CHANNEL; extern __shared__ unsigned char tile[]; // Coalesced read from global mem, rotated write into shared mem: if(index_tile < total_tile_size && index_tile >= 0 && index_in < image_size && index_in >= 0){ tile[index_tile] = in[index_in]; tile[index_tile+1] = in[index_in+1]; tile[index_tile+2] = in[index_in+2]; } __syncthreads(); int index_out = out_corner_i*height + y * height * CHANNEL + height * CHANNEL - (x+1) * CHANNEL - out_corner_j * CHANNEL; // Read from shared mem, coalesced write to global mem: if(index_tile < total_tile_size && index_tile >= 0 && index_out < image_size && index_out >= 0 && index_in < image_size && index_in >= 0){ out[index_out] = tile[index_tile]; out[index_out + 1] = tile[index_tile + 1]; out[index_out + 2] = tile[index_tile + 2]; } } // To be launched with one thread per element. Thread blocks read & write tiles in coalesced fashion. // Shared memory array padded to avoid bank conflicts. __global__ void rotate_90_parallel_per_element_tiled_padded(unsigned char in[], unsigned char out[], int height, int width, unsigned int tile_size) { // (i,j) locations of the tile corners for input & output matrices: int in_corner_i = blockIdx.x * tile_size, in_corner_j = blockIdx.y * tile_size * CHANNEL; int out_corner_i = blockIdx.y * tile_size * CHANNEL, out_corner_j = blockIdx.x * tile_size; int x = threadIdx.x, y = threadIdx.y; int image_size = height * width * CHANNEL; int total_tile_size = tile_size * (tile_size + 1) * CHANNEL ; int index_tile = x * (tile_size + 1)*CHANNEL + y * CHANNEL; int index_in = in_corner_i * width * CHANNEL + x * width *CHANNEL + in_corner_j + y * CHANNEL; extern __shared__ unsigned char tile[]; // coalesced read from global mem, rotated write into shared mem: if(index_tile < total_tile_size && index_tile >= 0 && index_in < image_size){ tile[index_tile] = in[index_in]; tile[index_tile+1] = in[index_in+1]; tile[index_tile+2] = in[index_in+2]; } __syncthreads(); int index_out = out_corner_i*height + y * height * CHANNEL + height * CHANNEL - (x+1)*CHANNEL - out_corner_j*CHANNEL; // Read from shared mem, coalesced write to global mem: if(index_tile < total_tile_size && index_tile >= 0 && index_out < image_size && index_out >= 0 && index_in < image_size && index_in >=0 ){ out[index_out] = tile[index_tile]; out[index_out + 1] = tile[index_tile + 1]; out[index_out + 2] = tile[index_tile + 2]; } } int main(int argc, char **argv){ if (argc != 3){ printf("Usage: exec filename kernel\n"); exit(1); } char *fname = argv[1]; char kname[100] = ""; // kernel name int choice = atoi(argv[2]); // kernel choice Image source; int max_col_val; GpuTimer timer; unsigned char *d_in, *d_out; // Read the input file if (readInpImg(fname, source, max_col_val) != 0) exit(1); source.dev_img = (unsigned char *)malloc(source.bytes); CHECK(cudaMalloc(&d_in, source.bytes)); CHECK(cudaMalloc(&d_out, source.bytes)); CHECK(cudaMemcpy(d_in, source.img, source.bytes, cudaMemcpyHostToDevice)); // Run selected kernel switch (choice) { case 1: // Serial execution on GPU, i.e. creating ONLY ONE thread strcpy(kname, "Serial execution on GPU"); timer.Start(); rotate_90_serial <<<1, 1>>> (d_in, d_out, source.height, source.width); timer.Stop(); CHECK(cudaMemcpy(source.dev_img, d_out, source.bytes, cudaMemcpyDeviceToHost)); break; case 2: { // One thread per pixel strcpy(kname, "One thread per pixel"); int k = 32; dim3 blocks(ceil((float)source.height / (float)k), ceil((float)source.width / (float)k)); // blocks per grid (using ceil in case height or width are not multiple of k) dim3 threads(k, k); // threads per block timer.Start(); rotate_90_parallel_per_element <<<blocks, threads>>> (d_in, d_out, source.height, source.width); timer.Stop(); CHECK(cudaMemcpy(source.dev_img, d_out, source.bytes, cudaMemcpyDeviceToHost)); break; } case 3: { // One thread per pixel - tiled (16 X 16) strcpy(kname, "One thread per pixel - tiled (16 X 16)"); int k = 16; // tile size is k x k unsigned int shmem_size = k * k * CHANNEL * sizeof(unsigned char); dim3 blocks(ceil((float)source.height / (float)k), ceil((float)source.width / (float)k)); // blocks per grid dim3 threads(k, k); // threads per block timer.Start(); rotate_90_parallel_per_element_tiled <<<blocks, threads, shmem_size>>> (d_in, d_out, source.height, source.width, k); timer.Stop(); CHECK(cudaMemcpy(source.dev_img, d_out, source.bytes, cudaMemcpyDeviceToHost)); break; } case 4: { // One thread per matrix element - tiled (16x16) - no shared mem conflict strcpy(kname, "One thread per matrix element - tiled (16x16) - no shared mem conflict"); int k = 16; // tile size is k x k dim3 blocks(ceil((float)source.height / (float)k), ceil((float)source.width / (float)k)); // blocks per grid dim3 threads(k, k); // threads per block unsigned int shmem_size = k * (k+1) * CHANNEL * sizeof(unsigned char); timer.Start(); rotate_90_parallel_per_element_tiled_padded <<<blocks, threads, shmem_size>>> (d_in, d_out, source.height, source.width, k); timer.Stop(); CHECK(cudaMemcpy(source.dev_img, d_out, source.bytes, cudaMemcpyDeviceToHost)); break; } default: printf("Choose a kernel between 1 and 4"); exit(1); } printf("\nRotating Image \"%s\" with Height = %d and Width = %d.\nUsing kernel %d: %s\nElapsed time: %g ms.\n\n", argv[1], source.height, source.width, choice, kname, timer.Elapsed()); // Swap height and width for the rotated image int temp = source.height; source.height = source.width; source.width = temp; // Write the output file if (writeOutImg("rotated.ppm", source, max_col_val) != 0) // For demonstration, the input file is written to a new file named "rotated.ppm" exit(1); // free up the allocated memory free(source.img); free(source.dev_img); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out)); exit(0); }
b426cc3215f71bc34ea2cfe3b3283ce9caf6681d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <algorithm> #include <iostream> #ifdef _WIN32 #include <numeric> #endif #include <random> #define PADDLE_CUDA_FP16 #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/device/gpu/gpu_helper.h" using paddle::platform::PADDLE_CUDA_NUM_THREADS; using paddle::platform::float16; template <typename T> __global__ void AddKernel(const T* data_a, T* data_b, size_t num) { CUDA_KERNEL_LOOP(i, num) { paddle::platform::CudaAtomicAdd(&data_b[i], data_a[i]); } } template <typename T> struct AddFunctor { T operator()(const T& a, const T& b) { return a + b; } }; template <typename T> void TestCase(size_t num) { T *in1, *in2, *out; T *d_in1, *d_in2; size_t size = sizeof(T) * num; #ifdef PADDLE_WITH_HIP hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), size); #else hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), size); #endif in1 = reinterpret_cast<T*>(malloc(size)); in2 = reinterpret_cast<T*>(malloc(size)); out = reinterpret_cast<T*>(malloc(size)); std::minstd_rand engine; std::uniform_real_distribution<double> dist(0.0, 1.0); for (size_t i = 0; i < num; ++i) { in1[i] = static_cast<T>(dist(engine)); in2[i] = static_cast<T>(dist(engine)); } #ifdef PADDLE_WITH_HIP hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num); hipDeviceSynchronize(); hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); #else hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( AddKernel<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num); hipDeviceSynchronize(); hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); #endif for (size_t i = 0; i < num; ++i) { // NOTE(dzhwinter): the float16 add has small underflow/overflow // so we use EXPECT_NEAR to check the result. EXPECT_NEAR(static_cast<float>(out[i]), static_cast<float>(AddFunctor<T>()(in1[i], in2[i])), 0.001); } free(in1); free(in2); free(out); #ifdef PADDLE_WITH_HIP hipFree(d_in1); hipFree(d_in2); #else hipFree(d_in1); hipFree(d_in2); #endif } // cuda primitives TEST(CudaAtomic, Add) { TestCase<float>(static_cast<size_t>(10)); TestCase<float>(static_cast<size_t>(1024 * 1024)); TestCase<double>(static_cast<size_t>(10)); TestCase<double>(static_cast<size_t>(1024 * 1024)); } TEST(CudaAtomic, float16) { TestCase<float16>(static_cast<size_t>(1)); TestCase<float16>(static_cast<size_t>(2)); TestCase<float16>(static_cast<size_t>(3)); TestCase<float16>(static_cast<size_t>(10)); TestCase<float16>(static_cast<size_t>(1024 * 1024)); } // unalignment of uint8 void TestUnalign(size_t num, const int shift_bit) { ASSERT_EQ(num % 2, 0); float16 *in1, *in2, *out; float16 *d_in1, *d_in2; size_t size = sizeof(uint8_t) * (num + shift_bit); size_t array_size = sizeof(float16) * (num / 2); #ifdef PADDLE_WITH_HIP hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), size); #else hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), size); #endif in1 = reinterpret_cast<float16*>(malloc(size)); in2 = reinterpret_cast<float16*>(malloc(size)); out = reinterpret_cast<float16*>(malloc(size)); // right shift 1, mimic the unalignment of address float16* r_in1 = reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in1) + shift_bit); float16* r_in2 = reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in2) + shift_bit); std::minstd_rand engine; std::uniform_real_distribution<double> dist(0.0, 1.0); for (size_t i = 0; i < num / 2; ++i) { r_in1[i] = static_cast<float16>(dist(engine)); r_in2[i] = static_cast<float16>(dist(engine)); } #ifdef PADDLE_WITH_HIP hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice); hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice); hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<float16>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num / 2); hipDeviceSynchronize(); hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); #else hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice); hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( AddKernel<float16>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num / 2); hipDeviceSynchronize(); hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); #endif for (size_t i = 0; i < num / 2; ++i) { // NOTE(dzhwinter): the float16 add has small truncate error. // so we use EXPECT_NEAR to check the result. EXPECT_NEAR(static_cast<float>(out[i]), static_cast<float>(AddFunctor<float16>()(r_in1[i], r_in2[i])), 0.001); } free(in1); free(in2); free(out); #ifdef PADDLE_WITH_HIP hipFree(d_in1); hipFree(d_in2); #else hipFree(d_in1); hipFree(d_in2); #endif } TEST(CudaAtomic, float16Unalign) { // same with float16 testcase TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 2); TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 2); TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 2); // shift the address. TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 1); TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 1); TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 1); TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 3); TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 3); TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 3); } // https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ template <typename T> static __forceinline__ __device__ T WarpReduceSum(T val) { unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int offset = warpSize / 2; offset > 0; offset /= 2) { val += paddle::platform::CudaShuffleDownSync(mask, val, offset); } return val; } template <typename T> __forceinline__ __device__ T BlockReduce(T val) { static __shared__ T shared[32]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = WarpReduceSum(val); // Each warp performs partial reduction if (lane == 0) shared[wid] = val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions // read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : static_cast<T>(0); if (wid == 0) val = WarpReduceSum(val); // Final reduce within first warp return val; } template <typename T> __global__ void DeviceReduceSum(T* in, T* out, size_t N) { T sum(0); CUDA_KERNEL_LOOP(i, N) { sum += in[i]; } sum = BlockReduce<T>(sum); __syncthreads(); if (threadIdx.x == 0) out[blockIdx.x] = sum; } template <typename T> void TestReduce(size_t num, float atol = 0.01) { T* in1; T *d_in1, *d_in2; size_t size = sizeof(T) * num; #ifdef PADDLE_WITH_HIP hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T)); #else hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T)); #endif in1 = reinterpret_cast<T*>(malloc(size)); std::minstd_rand engine; std::uniform_real_distribution<double> dist(0.0, 1.0); for (size_t i = 0; i < num; ++i) { in1[i] = static_cast<T>(dist(engine)); } auto out = std::accumulate(in1, in1 + num, static_cast<T>(0)); #ifdef PADDLE_WITH_HIP hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(HIP_KERNEL_NAME(DeviceReduceSum<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num); hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost); hipDeviceSynchronize(); #else hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( DeviceReduceSum<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num); hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost); hipDeviceSynchronize(); #endif // NOTE(dzhwinter): the float16 add has small underflow/overflow // so we use EXPECT_NEAR to check the result. EXPECT_NEAR(static_cast<float>(in1[0]), static_cast<float>(out), atol); free(in1); #ifdef PADDLE_WITH_HIP hipFree(d_in1); hipFree(d_in2); #else hipFree(d_in1); hipFree(d_in2); #endif } TEST(CudaShuffleSync, float16) { TestReduce<float>(10); TestReduce<float>(1000); // float16 will overflow or accumulate truncate errors in big size. TestReduce<float16>(10); TestReduce<float16>(100, /*atol error*/ 1.0); }
b426cc3215f71bc34ea2cfe3b3283ce9caf6681d.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <algorithm> #include <iostream> #ifdef _WIN32 #include <numeric> #endif #include <random> #define PADDLE_CUDA_FP16 #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/device/gpu/gpu_helper.h" using paddle::platform::PADDLE_CUDA_NUM_THREADS; using paddle::platform::float16; template <typename T> __global__ void AddKernel(const T* data_a, T* data_b, size_t num) { CUDA_KERNEL_LOOP(i, num) { paddle::platform::CudaAtomicAdd(&data_b[i], data_a[i]); } } template <typename T> struct AddFunctor { T operator()(const T& a, const T& b) { return a + b; } }; template <typename T> void TestCase(size_t num) { T *in1, *in2, *out; T *d_in1, *d_in2; size_t size = sizeof(T) * num; #ifdef PADDLE_WITH_HIP hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), size); #else cudaMalloc(reinterpret_cast<void**>(&d_in1), size); cudaMalloc(reinterpret_cast<void**>(&d_in2), size); #endif in1 = reinterpret_cast<T*>(malloc(size)); in2 = reinterpret_cast<T*>(malloc(size)); out = reinterpret_cast<T*>(malloc(size)); std::minstd_rand engine; std::uniform_real_distribution<double> dist(0.0, 1.0); for (size_t i = 0; i < num; ++i) { in1[i] = static_cast<T>(dist(engine)); in2[i] = static_cast<T>(dist(engine)); } #ifdef PADDLE_WITH_HIP hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num); hipDeviceSynchronize(); hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); #else cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); AddKernel<T><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num); cudaDeviceSynchronize(); cudaMemcpy(out, d_in2, size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); #endif for (size_t i = 0; i < num; ++i) { // NOTE(dzhwinter): the float16 add has small underflow/overflow // so we use EXPECT_NEAR to check the result. EXPECT_NEAR(static_cast<float>(out[i]), static_cast<float>(AddFunctor<T>()(in1[i], in2[i])), 0.001); } free(in1); free(in2); free(out); #ifdef PADDLE_WITH_HIP hipFree(d_in1); hipFree(d_in2); #else cudaFree(d_in1); cudaFree(d_in2); #endif } // cuda primitives TEST(CudaAtomic, Add) { TestCase<float>(static_cast<size_t>(10)); TestCase<float>(static_cast<size_t>(1024 * 1024)); TestCase<double>(static_cast<size_t>(10)); TestCase<double>(static_cast<size_t>(1024 * 1024)); } TEST(CudaAtomic, float16) { TestCase<float16>(static_cast<size_t>(1)); TestCase<float16>(static_cast<size_t>(2)); TestCase<float16>(static_cast<size_t>(3)); TestCase<float16>(static_cast<size_t>(10)); TestCase<float16>(static_cast<size_t>(1024 * 1024)); } // unalignment of uint8 void TestUnalign(size_t num, const int shift_bit) { ASSERT_EQ(num % 2, 0); float16 *in1, *in2, *out; float16 *d_in1, *d_in2; size_t size = sizeof(uint8_t) * (num + shift_bit); size_t array_size = sizeof(float16) * (num / 2); #ifdef PADDLE_WITH_HIP hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), size); #else cudaMalloc(reinterpret_cast<void**>(&d_in1), size); cudaMalloc(reinterpret_cast<void**>(&d_in2), size); #endif in1 = reinterpret_cast<float16*>(malloc(size)); in2 = reinterpret_cast<float16*>(malloc(size)); out = reinterpret_cast<float16*>(malloc(size)); // right shift 1, mimic the unalignment of address float16* r_in1 = reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in1) + shift_bit); float16* r_in2 = reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in2) + shift_bit); std::minstd_rand engine; std::uniform_real_distribution<double> dist(0.0, 1.0); for (size_t i = 0; i < num / 2; ++i) { r_in1[i] = static_cast<float16>(dist(engine)); r_in2[i] = static_cast<float16>(dist(engine)); } #ifdef PADDLE_WITH_HIP hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice); hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice); hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<float16>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num / 2); hipDeviceSynchronize(); hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); #else cudaMemcpy(d_in1, r_in1, array_size, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, r_in2, array_size, cudaMemcpyHostToDevice); AddKernel<float16><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num / 2); cudaDeviceSynchronize(); cudaMemcpy(out, d_in2, array_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); #endif for (size_t i = 0; i < num / 2; ++i) { // NOTE(dzhwinter): the float16 add has small truncate error. // so we use EXPECT_NEAR to check the result. EXPECT_NEAR(static_cast<float>(out[i]), static_cast<float>(AddFunctor<float16>()(r_in1[i], r_in2[i])), 0.001); } free(in1); free(in2); free(out); #ifdef PADDLE_WITH_HIP hipFree(d_in1); hipFree(d_in2); #else cudaFree(d_in1); cudaFree(d_in2); #endif } TEST(CudaAtomic, float16Unalign) { // same with float16 testcase TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 2); TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 2); TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 2); // shift the address. TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 1); TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 1); TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 1); TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 3); TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 3); TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 3); } // https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ template <typename T> static __forceinline__ __device__ T WarpReduceSum(T val) { unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int offset = warpSize / 2; offset > 0; offset /= 2) { val += paddle::platform::CudaShuffleDownSync(mask, val, offset); } return val; } template <typename T> __forceinline__ __device__ T BlockReduce(T val) { static __shared__ T shared[32]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = WarpReduceSum(val); // Each warp performs partial reduction if (lane == 0) shared[wid] = val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions // read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : static_cast<T>(0); if (wid == 0) val = WarpReduceSum(val); // Final reduce within first warp return val; } template <typename T> __global__ void DeviceReduceSum(T* in, T* out, size_t N) { T sum(0); CUDA_KERNEL_LOOP(i, N) { sum += in[i]; } sum = BlockReduce<T>(sum); __syncthreads(); if (threadIdx.x == 0) out[blockIdx.x] = sum; } template <typename T> void TestReduce(size_t num, float atol = 0.01) { T* in1; T *d_in1, *d_in2; size_t size = sizeof(T) * num; #ifdef PADDLE_WITH_HIP hipMalloc(reinterpret_cast<void**>(&d_in1), size); hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T)); #else cudaMalloc(reinterpret_cast<void**>(&d_in1), size); cudaMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T)); #endif in1 = reinterpret_cast<T*>(malloc(size)); std::minstd_rand engine; std::uniform_real_distribution<double> dist(0.0, 1.0); for (size_t i = 0; i < num; ++i) { in1[i] = static_cast<T>(dist(engine)); } auto out = std::accumulate(in1, in1 + num, static_cast<T>(0)); #ifdef PADDLE_WITH_HIP hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(HIP_KERNEL_NAME(DeviceReduceSum<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num); hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost); hipDeviceSynchronize(); #else cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); DeviceReduceSum<T><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num); cudaMemcpy(in1, d_in2, sizeof(T), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); #endif // NOTE(dzhwinter): the float16 add has small underflow/overflow // so we use EXPECT_NEAR to check the result. EXPECT_NEAR(static_cast<float>(in1[0]), static_cast<float>(out), atol); free(in1); #ifdef PADDLE_WITH_HIP hipFree(d_in1); hipFree(d_in2); #else cudaFree(d_in1); cudaFree(d_in2); #endif } TEST(CudaShuffleSync, float16) { TestReduce<float>(10); TestReduce<float>(1000); // float16 will overflow or accumulate truncate errors in big size. TestReduce<float16>(10); TestReduce<float16>(100, /*atol error*/ 1.0); }
d5a917c472f76d52a405f1eb9b5dc40b145401bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This algorithm was adapted from SegAlign's Ungapped Extender authored by * Sneha Goenka ([email protected]) and Yatish Turakhia ([email protected]). * Source code for original implementation and use in SegAlign can be found * here: https://github.com/gsneha26/SegAlign * Description of the algorithm and original implementation can be found in the SegAlign * paper published in SC20 (https://doi.ieeecomputersociety.org/10.1109/SC41405.2020.00043) */ #include "ungapped_xdrop.cuh" #include "ungapped_xdrop_kernels.cuh" #include <claraparabricks/genomeworks/utils/pinned_host_vector.hpp> #include <claraparabricks/genomeworks/utils/cudautils.hpp> #include <thrust/system/hip/execution_policy.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <hipcub/hipcub.hpp> namespace claraparabricks { namespace genomeworks { namespace cudaextender { using namespace cudautils; UngappedXDrop::UngappedXDrop(const int32_t* h_score_mat, const int32_t score_mat_dim, const int32_t xdrop_threshold, const bool no_entropy, hipStream_t stream, const int32_t device_id, DefaultDeviceAllocator allocator) : h_score_mat_(h_score_mat, h_score_mat + score_mat_dim) , score_mat_dim_(score_mat_dim) , xdrop_threshold_(xdrop_threshold) , no_entropy_(no_entropy) , stream_(stream) , device_id_(device_id) , host_ptr_api_mode_(false) , allocator_(allocator) { // Switch to device for copying over initial structures scoped_device_switch dev(device_id_); // Calculate the max limits on the number of extensions we can do on this GPU hipDeviceProp_t device_prop; hipGetDeviceProperties(&device_prop, device_id_); // TODO - Currently element and memory limits are artifacts of hardcoded global memory limits in // SegAlign. To be replaced with actual calculation of memory requirements with sizes of // datastructures taken into consideration. Also currently the max limit is based on total // global memory, which should be replaced with memory available from the passed in allocator. // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/576 const int32_t max_ungapped_per_gb = 4194304; const float global_mem_gb = static_cast<float>(device_prop.totalGlobalMem) / 1073741824.0f; batch_max_ungapped_extensions_ = static_cast<int32_t>(global_mem_gb) * max_ungapped_per_gb; //Figure out memory requirements for cub functions size_t cub_storage_bytes = 0; GW_CU_CHECK_ERR(hipcub::DeviceScan::InclusiveSum(nullptr, cub_storage_bytes, d_done_.data(), d_done_.data(), batch_max_ungapped_extensions_, stream_)); // Allocate space on device for scoring matrix and intermediate results d_score_mat_ = device_buffer<int32_t>(score_mat_dim_, allocator_, stream_); d_done_ = device_buffer<int32_t>(batch_max_ungapped_extensions_, allocator_, stream_); d_tmp_ssp_ = device_buffer<ScoredSegmentPair>(batch_max_ungapped_extensions_, allocator_, stream_); d_temp_storage_cub_ = device_buffer<int8_t>(cub_storage_bytes, allocator_, stream_); device_copy_n_async(h_score_mat_.data(), score_mat_dim_, d_score_mat_.data(), stream_); } StatusType UngappedXDrop::extend_async(const int8_t* d_query, const int32_t query_length, const int8_t* d_target, const int32_t target_length, const int32_t score_threshold, const SeedPair* d_seed_pairs, const int32_t num_seed_pairs, ScoredSegmentPair* d_scored_segment_pairs, int32_t* d_num_scored_segment_pairs) { if (d_query == nullptr || d_target == nullptr || d_seed_pairs == nullptr) { GW_LOG_ERROR("Invalid input pointers"); return StatusType::invalid_input; } if (d_scored_segment_pairs == nullptr || d_num_scored_segment_pairs == nullptr) { GW_LOG_ERROR("Invalid output pointers"); return StatusType::invalid_input; } // Switch to configured GPU scoped_device_switch dev(device_id_); total_scored_segment_pairs_ = 0; for (int32_t seed_pair_start = 0; seed_pair_start < num_seed_pairs; seed_pair_start += batch_max_ungapped_extensions_) { // TODO - Kernel optimizations [Unnecessary memset?] // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/579 GW_CU_CHECK_ERR(hipMemsetAsync((void*)d_done_.data(), 0, batch_max_ungapped_extensions_ * sizeof(int32_t), stream_)); GW_CU_CHECK_ERR(hipMemsetAsync((void*)d_tmp_ssp_.data(), 0, batch_max_ungapped_extensions_ * sizeof(ScoredSegmentPair), stream_)); const int32_t curr_num_pairs = ::min(batch_max_ungapped_extensions_, num_seed_pairs - seed_pair_start); hipLaunchKernelGGL(( find_high_scoring_segment_pairs), dim3(1024), dim3(128), 0, stream_, d_target, target_length, d_query, query_length, d_score_mat_.data(), no_entropy_, xdrop_threshold_, score_threshold, d_seed_pairs, curr_num_pairs, seed_pair_start, d_scored_segment_pairs, d_done_.data()); size_t cub_storage_bytes = d_temp_storage_cub_.size(); GW_CU_CHECK_ERR(hipcub::DeviceScan::InclusiveSum(d_temp_storage_cub_.data(), cub_storage_bytes, d_done_.data(), d_done_.data(), curr_num_pairs, stream_)) // TODO- Make output compression async. Currently synchronocity is arising due to // thrust::stable_sort. Dynamic parallelism or an equivalent sort with cub can be used // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/578 const int32_t num_scored_segment_pairs = get_value_from_device(d_done_.data() + curr_num_pairs - 1, stream_); if (num_scored_segment_pairs > 0) { // TODO - Explore scaling up/down launch config based on workload. Also explore making // this accessible to the user for configuration // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/577 hipLaunchKernelGGL(( compress_output), dim3(1024), dim3(1024), 0, stream_, d_done_.data(), seed_pair_start, d_scored_segment_pairs, d_tmp_ssp_.data(), curr_num_pairs); thrust::stable_sort(thrust::hip::par(allocator_).on(stream_), d_tmp_ssp_.begin(), d_tmp_ssp_.begin() + num_scored_segment_pairs, scored_segment_pair_comp()); ScoredSegmentPair* result_end = thrust::unique_copy(thrust::hip::par(allocator_).on(stream_), d_tmp_ssp_.begin(), d_tmp_ssp_.begin() + num_scored_segment_pairs, d_scored_segment_pairs + total_scored_segment_pairs_, scored_segment_pair_diagonal_overlap()); total_scored_segment_pairs_ += thrust::distance( d_scored_segment_pairs + total_scored_segment_pairs_, result_end); } } set_device_value_async(d_num_scored_segment_pairs, &total_scored_segment_pairs_, stream_); return StatusType::success; } StatusType UngappedXDrop::extend_async(const int8_t* h_query, const int32_t query_length, const int8_t* h_target, const int32_t target_length, const int32_t score_threshold, const std::vector<SeedPair>& h_seed_pairs) { // Reset the extender if it was used before in this mode reset(); // Set host pointer mode on host_ptr_api_mode_ = true; // Allocate space for query and target sequences d_query_ = device_buffer<int8_t>(query_length, allocator_, stream_); d_target_ = device_buffer<int8_t>(target_length, allocator_, stream_); // Allocate space for SeedPair input d_seed_pairs_ = device_buffer<SeedPair>(h_seed_pairs.size(), allocator_, stream_); // Allocate space for ScoredSegmentPair output d_ssp_ = device_buffer<ScoredSegmentPair>(h_seed_pairs.size(), allocator_, stream_); d_num_ssp_ = device_buffer<int32_t>(1, allocator_, stream_); // Async memcopy all the input values to device device_copy_n_async(h_query, query_length, d_query_.data(), stream_); device_copy_n_async(h_target, target_length, d_target_.data(), stream_); device_copy_n_async(h_seed_pairs.data(), h_seed_pairs.size(), d_seed_pairs_.data(), stream_); // Launch the ungapped extender device function return extend_async(d_query_.data(), query_length, d_target_.data(), target_length, score_threshold, d_seed_pairs_.data(), d_seed_pairs_.size(), d_ssp_.data(), d_num_ssp_.data()); } StatusType UngappedXDrop::sync() { if (host_ptr_api_mode_) { const int32_t h_num_ssp = get_value_from_device(d_num_ssp_.data(), stream_); if (h_num_ssp > 0) { h_ssp_.resize(h_num_ssp); device_copy_n_async(d_ssp_.data(), h_num_ssp, h_ssp_.data(), stream_); GW_CU_CHECK_ERR(hipStreamSynchronize(stream_)); } return StatusType::success; } // If this function was called without using the host pointer API, throw error return StatusType::invalid_operation; } const std::vector<ScoredSegmentPair>& UngappedXDrop::get_scored_segment_pairs() const { if (host_ptr_api_mode_) { return h_ssp_; } // If this function was called using the host pointer API, throw error throw std::runtime_error("Invalid API call. Getting scored segment pairs without calling extend_async host ptr API"); } void UngappedXDrop::reset() { // Reset these only if host pointer API was used earlier if (host_ptr_api_mode_) { h_ssp_.clear(); host_ptr_api_mode_ = false; } } UngappedXDrop::~UngappedXDrop() { UngappedXDrop::reset(); } } // namespace cudaextender } // namespace genomeworks } // namespace claraparabricks
d5a917c472f76d52a405f1eb9b5dc40b145401bf.cu
/* * Copyright 2020 NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This algorithm was adapted from SegAlign's Ungapped Extender authored by * Sneha Goenka ([email protected]) and Yatish Turakhia ([email protected]). * Source code for original implementation and use in SegAlign can be found * here: https://github.com/gsneha26/SegAlign * Description of the algorithm and original implementation can be found in the SegAlign * paper published in SC20 (https://doi.ieeecomputersociety.org/10.1109/SC41405.2020.00043) */ #include "ungapped_xdrop.cuh" #include "ungapped_xdrop_kernels.cuh" #include <claraparabricks/genomeworks/utils/pinned_host_vector.hpp> #include <claraparabricks/genomeworks/utils/cudautils.hpp> #include <thrust/system/cuda/execution_policy.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <cub/device/device_scan.cuh> namespace claraparabricks { namespace genomeworks { namespace cudaextender { using namespace cudautils; UngappedXDrop::UngappedXDrop(const int32_t* h_score_mat, const int32_t score_mat_dim, const int32_t xdrop_threshold, const bool no_entropy, cudaStream_t stream, const int32_t device_id, DefaultDeviceAllocator allocator) : h_score_mat_(h_score_mat, h_score_mat + score_mat_dim) , score_mat_dim_(score_mat_dim) , xdrop_threshold_(xdrop_threshold) , no_entropy_(no_entropy) , stream_(stream) , device_id_(device_id) , host_ptr_api_mode_(false) , allocator_(allocator) { // Switch to device for copying over initial structures scoped_device_switch dev(device_id_); // Calculate the max limits on the number of extensions we can do on this GPU cudaDeviceProp device_prop; cudaGetDeviceProperties(&device_prop, device_id_); // TODO - Currently element and memory limits are artifacts of hardcoded global memory limits in // SegAlign. To be replaced with actual calculation of memory requirements with sizes of // datastructures taken into consideration. Also currently the max limit is based on total // global memory, which should be replaced with memory available from the passed in allocator. // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/576 const int32_t max_ungapped_per_gb = 4194304; const float global_mem_gb = static_cast<float>(device_prop.totalGlobalMem) / 1073741824.0f; batch_max_ungapped_extensions_ = static_cast<int32_t>(global_mem_gb) * max_ungapped_per_gb; //Figure out memory requirements for cub functions size_t cub_storage_bytes = 0; GW_CU_CHECK_ERR(cub::DeviceScan::InclusiveSum(nullptr, cub_storage_bytes, d_done_.data(), d_done_.data(), batch_max_ungapped_extensions_, stream_)); // Allocate space on device for scoring matrix and intermediate results d_score_mat_ = device_buffer<int32_t>(score_mat_dim_, allocator_, stream_); d_done_ = device_buffer<int32_t>(batch_max_ungapped_extensions_, allocator_, stream_); d_tmp_ssp_ = device_buffer<ScoredSegmentPair>(batch_max_ungapped_extensions_, allocator_, stream_); d_temp_storage_cub_ = device_buffer<int8_t>(cub_storage_bytes, allocator_, stream_); device_copy_n_async(h_score_mat_.data(), score_mat_dim_, d_score_mat_.data(), stream_); } StatusType UngappedXDrop::extend_async(const int8_t* d_query, const int32_t query_length, const int8_t* d_target, const int32_t target_length, const int32_t score_threshold, const SeedPair* d_seed_pairs, const int32_t num_seed_pairs, ScoredSegmentPair* d_scored_segment_pairs, int32_t* d_num_scored_segment_pairs) { if (d_query == nullptr || d_target == nullptr || d_seed_pairs == nullptr) { GW_LOG_ERROR("Invalid input pointers"); return StatusType::invalid_input; } if (d_scored_segment_pairs == nullptr || d_num_scored_segment_pairs == nullptr) { GW_LOG_ERROR("Invalid output pointers"); return StatusType::invalid_input; } // Switch to configured GPU scoped_device_switch dev(device_id_); total_scored_segment_pairs_ = 0; for (int32_t seed_pair_start = 0; seed_pair_start < num_seed_pairs; seed_pair_start += batch_max_ungapped_extensions_) { // TODO - Kernel optimizations [Unnecessary memset?] // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/579 GW_CU_CHECK_ERR(cudaMemsetAsync((void*)d_done_.data(), 0, batch_max_ungapped_extensions_ * sizeof(int32_t), stream_)); GW_CU_CHECK_ERR(cudaMemsetAsync((void*)d_tmp_ssp_.data(), 0, batch_max_ungapped_extensions_ * sizeof(ScoredSegmentPair), stream_)); const int32_t curr_num_pairs = std::min(batch_max_ungapped_extensions_, num_seed_pairs - seed_pair_start); find_high_scoring_segment_pairs<<<1024, 128, 0, stream_>>>(d_target, target_length, d_query, query_length, d_score_mat_.data(), no_entropy_, xdrop_threshold_, score_threshold, d_seed_pairs, curr_num_pairs, seed_pair_start, d_scored_segment_pairs, d_done_.data()); size_t cub_storage_bytes = d_temp_storage_cub_.size(); GW_CU_CHECK_ERR(cub::DeviceScan::InclusiveSum(d_temp_storage_cub_.data(), cub_storage_bytes, d_done_.data(), d_done_.data(), curr_num_pairs, stream_)) // TODO- Make output compression async. Currently synchronocity is arising due to // thrust::stable_sort. Dynamic parallelism or an equivalent sort with cub can be used // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/578 const int32_t num_scored_segment_pairs = get_value_from_device(d_done_.data() + curr_num_pairs - 1, stream_); if (num_scored_segment_pairs > 0) { // TODO - Explore scaling up/down launch config based on workload. Also explore making // this accessible to the user for configuration // Github Issue: https://github.com/clara-parabricks/GenomeWorks/issues/577 compress_output<<<1024, 1024, 0, stream_>>>(d_done_.data(), seed_pair_start, d_scored_segment_pairs, d_tmp_ssp_.data(), curr_num_pairs); thrust::stable_sort(thrust::cuda::par(allocator_).on(stream_), d_tmp_ssp_.begin(), d_tmp_ssp_.begin() + num_scored_segment_pairs, scored_segment_pair_comp()); ScoredSegmentPair* result_end = thrust::unique_copy(thrust::cuda::par(allocator_).on(stream_), d_tmp_ssp_.begin(), d_tmp_ssp_.begin() + num_scored_segment_pairs, d_scored_segment_pairs + total_scored_segment_pairs_, scored_segment_pair_diagonal_overlap()); total_scored_segment_pairs_ += thrust::distance( d_scored_segment_pairs + total_scored_segment_pairs_, result_end); } } set_device_value_async(d_num_scored_segment_pairs, &total_scored_segment_pairs_, stream_); return StatusType::success; } StatusType UngappedXDrop::extend_async(const int8_t* h_query, const int32_t query_length, const int8_t* h_target, const int32_t target_length, const int32_t score_threshold, const std::vector<SeedPair>& h_seed_pairs) { // Reset the extender if it was used before in this mode reset(); // Set host pointer mode on host_ptr_api_mode_ = true; // Allocate space for query and target sequences d_query_ = device_buffer<int8_t>(query_length, allocator_, stream_); d_target_ = device_buffer<int8_t>(target_length, allocator_, stream_); // Allocate space for SeedPair input d_seed_pairs_ = device_buffer<SeedPair>(h_seed_pairs.size(), allocator_, stream_); // Allocate space for ScoredSegmentPair output d_ssp_ = device_buffer<ScoredSegmentPair>(h_seed_pairs.size(), allocator_, stream_); d_num_ssp_ = device_buffer<int32_t>(1, allocator_, stream_); // Async memcopy all the input values to device device_copy_n_async(h_query, query_length, d_query_.data(), stream_); device_copy_n_async(h_target, target_length, d_target_.data(), stream_); device_copy_n_async(h_seed_pairs.data(), h_seed_pairs.size(), d_seed_pairs_.data(), stream_); // Launch the ungapped extender device function return extend_async(d_query_.data(), query_length, d_target_.data(), target_length, score_threshold, d_seed_pairs_.data(), d_seed_pairs_.size(), d_ssp_.data(), d_num_ssp_.data()); } StatusType UngappedXDrop::sync() { if (host_ptr_api_mode_) { const int32_t h_num_ssp = get_value_from_device(d_num_ssp_.data(), stream_); if (h_num_ssp > 0) { h_ssp_.resize(h_num_ssp); device_copy_n_async(d_ssp_.data(), h_num_ssp, h_ssp_.data(), stream_); GW_CU_CHECK_ERR(cudaStreamSynchronize(stream_)); } return StatusType::success; } // If this function was called without using the host pointer API, throw error return StatusType::invalid_operation; } const std::vector<ScoredSegmentPair>& UngappedXDrop::get_scored_segment_pairs() const { if (host_ptr_api_mode_) { return h_ssp_; } // If this function was called using the host pointer API, throw error throw std::runtime_error("Invalid API call. Getting scored segment pairs without calling extend_async host ptr API"); } void UngappedXDrop::reset() { // Reset these only if host pointer API was used earlier if (host_ptr_api_mode_) { h_ssp_.clear(); host_ptr_api_mode_ = false; } } UngappedXDrop::~UngappedXDrop() { UngappedXDrop::reset(); } } // namespace cudaextender } // namespace genomeworks } // namespace claraparabricks
a0a42d92aa1428b94d7870f1f306dae4740326d2.hip
// !!! This is a file automatically generated by hipify!!! //xfail:data-race // Write by thread 0 // Write by thread 1 // x = 1 #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define N 2//512 __global__ void example(float * A, int x) { //__requires(x = 1); // x deve ser a diferena entre o limite do if1 e do if2 if(threadIdx.x == 0) { A[threadIdx.x + x] = threadIdx.x; //A[1] = 0; } if(threadIdx.x == 1) { A[threadIdx.x] = threadIdx.x; //A[1] = 1; } } int main() { int c=1; float *a; float *dev_a; a = (float*)malloc(N*sizeof(float)); hipMalloc((void**)&dev_a, N*sizeof(float)); hipMemcpy(dev_a, a, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( example), dim3(1), dim3(N), 0, 0, dev_a, c); //ESBMC_verify_kernel_fuintint(example,1, N,dev_a, c); hipMemcpy(a, dev_a, N*sizeof(float), hipMemcpyDeviceToHost); assert(a[1] == 0 || a[1] == 1); free(a); hipFree(dev_a); return 0; }
a0a42d92aa1428b94d7870f1f306dae4740326d2.cu
//xfail:data-race // Write by thread 0 // Write by thread 1 // x = 1 #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define N 2//512 __global__ void example(float * A, int x) { //__requires(x = 1); // x deve ser a diferença entre o limite do if1 e do if2 if(threadIdx.x == 0) { A[threadIdx.x + x] = threadIdx.x; //A[1] = 0; } if(threadIdx.x == 1) { A[threadIdx.x] = threadIdx.x; //A[1] = 1; } } int main() { int c=1; float *a; float *dev_a; a = (float*)malloc(N*sizeof(float)); cudaMalloc((void**)&dev_a, N*sizeof(float)); cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice); example<<<1, N>>>(dev_a, c); //ESBMC_verify_kernel_fuintint(example,1, N,dev_a, c); cudaMemcpy(a, dev_a, N*sizeof(float), cudaMemcpyDeviceToHost); assert(a[1] == 0 || a[1] == 1); free(a); cudaFree(dev_a); return 0; }
a7a4bc20eeee60b15ed1bf2192daaa28e7823a9e.hip
// !!! This is a file automatically generated by hipify!!! /* * ============================================================================ * * Authors: * Hunter McCoy <[email protected] * * * About: * This file contains k-mer speed tests for several Hash Table Types * built using POGGERS. For more verbose testing please see the * benchmarks folder. * * ============================================================================ */ //#include "include/templated_quad_table.cuh" #include <poggers/metadata.cuh> #include <poggers/hash_schemes/murmurhash.cuh> #include <poggers/probing_schemes/linear_probing.cuh> #include <poggers/probing_schemes/double_hashing.cuh> #include <poggers/insert_schemes/power_of_n_shortcut.cuh> #include <poggers/insert_schemes/single_slot_insert.cuh> #include <poggers/insert_schemes/bucket_insert.cuh> #include <poggers/insert_schemes/power_of_n.cuh> #include <poggers/representations/key_val_pair.cuh> #include <poggers/representations/shortened_key_val_pair.cuh> #include <poggers/probing_schemes/xor_power_of_two.cuh> #include <poggers/representations/dynamic_container.cuh> #include <poggers/representations/key_only.cuh> #include <poggers/sizing/default_sizing.cuh> #include <poggers/sizing/variadic_sizing.cuh> #include <poggers/tables/base_table.cuh> #include <poggers/metadata.cuh> #include <poggers/hash_schemes/murmurhash.cuh> #include <poggers/probing_schemes/linear_probing.cuh> #include <poggers/probing_schemes/double_hashing.cuh> #include <poggers/probing_schemes/power_of_two.cuh> #include <poggers/insert_schemes/single_slot_insert.cuh> #include <poggers/insert_schemes/bucket_insert.cuh> #include <poggers/insert_schemes/power_of_n.cuh> #include <poggers/representations/key_val_pair.cuh> #include <poggers/representations/shortened_key_val_pair.cuh> #include <poggers/sizing/default_sizing.cuh> #include <poggers/tables/base_table.cuh> #include <poggers/insert_schemes/power_of_n_shortcut.cuh> #include <poggers/sizing/variadic_sizing.cuh> #include <poggers/representations/soa.cuh> #include <poggers/insert_schemes/power_of_n_shortcut_buckets.cuh> #include <poggers/tables/bucketed_table.cuh> #include <poggers/metadata.cuh> #include <poggers/hash_schemes/murmurhash.cuh> #include <poggers/probing_schemes/double_hashing.cuh> #include <poggers/probing_schemes/power_of_two.cuh> // new container for 2-byte key val pairs #include <poggers/representations/grouped_key_val_pair.cuh> #include <poggers/representations/key_val_pair.cuh> #include <poggers/representations/dynamic_container.cuh> #include <poggers/sizing/default_sizing.cuh> #include <poggers/insert_schemes/power_of_n_shortcut.cuh> #include <poggers/insert_schemes/power_of_n_shortcut_buckets.cuh> #include <poggers/representations/packed_bucket.cuh> #include <poggers/insert_schemes/linear_insert_buckets.cuh> #include <poggers/tables/bucketed_table.cuh> #include <poggers/representations/grouped_storage_sub_bits.cuh> #include <poggers/probing_schemes/xor_power_of_two.cuh> #include <stdio.h> #include <iostream> #include <chrono> #include <openssl/rand.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <sys/mman.h> #include <unistd.h> #include <random> #include <assert.h> #include <chrono> #include <iostream> #include <fstream> #include <string> #include <algorithm> #include <bitset> #include <warpcore/bloom_filter.cuh> // using tiny_static_table_4 = poggers::tables::static_table<uint64_t, uint16_t, poggers::representations::dynamic_container<poggers::representations::key_container,uint16_t>::representation, 4, 4, poggers::insert_schemes::bucket_insert, 20, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher>; // using tcf = poggers::tables::static_table<uint64_t,uint16_t, poggers::representations::dynamic_container<poggers::representations::key_container,uint16_t>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_scheme, 2, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher, true, tiny_static_table_4>; //Same table but with value support //swap out key-only container with key-val pair - dynamic container automatically compresses keys/vals into one uint32 // using tiny_static_table_4 = poggers::tables::static_table<uint64_t, uint16_t, poggers::representations::dynamic_container<poggers::representations::key_val_pair,uint16_t>::representation, 4, 4, poggers::insert_schemes::bucket_insert, 20, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher>; // using tcf = poggers::tables::static_table<uint64_t,uint16_t, poggers::representations::dynamic_container<poggers::representations::key_val_pair,uint16_t>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_scheme, 2, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher, true, tiny_static_table_4>; using del_backing_table = poggers::tables::bucketed_table< uint64_t, uint16_t, poggers::representations::dynamic_bucket_container<poggers::representations::dynamic_container< poggers::representations::bit_grouped_container<16, 16>::representation, uint16_t>::representation>::representation, 4, 8, poggers::insert_schemes::linear_insert_bucket_scheme, 400, poggers::probing_schemes::linearProber, poggers::hashers::murmurHasher>; using tcf = poggers::tables::bucketed_table< uint64_t, uint16_t, poggers::representations::dynamic_bucket_container<poggers::representations::dynamic_container< poggers::representations::bit_grouped_container<16, 16>::representation, uint16_t>::representation>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_bucket_scheme, 2, poggers::probing_schemes::XORPowerOfTwoHasher, poggers::hashers::murmurHasher, true, del_backing_table>; //using tcqf_no_back = poggers::tables::static_table<uint64_t,uint16_t, poggers::representations::dynamic_container<poggers::representations::key_container,uint16_t>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_scheme, 2, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher>; using warpcore_bloom = warpcore::BloomFilter<uint64_t>; #define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } uint64_t num_slots_per_p2(uint64_t nitems){ //uint64_t nitems = .9*(1ULL << nbits); //for p=1/100, this is the correct value uint64_t nslots = 959*nitems/100; printf("using %llu slots\n", nslots); return nslots; } template <typename T> __host__ T * generate_data(uint64_t nitems){ //malloc space T * vals = (T *) malloc(nitems * sizeof(T)); // 100,000,000 uint64_t cap = 100000000ULL; for (uint64_t to_fill = 0; to_fill < nitems; to_fill+=0){ uint64_t togen = (nitems - to_fill > cap) ? cap : nitems - to_fill; RAND_bytes((unsigned char *) (vals + to_fill), togen * sizeof(T)); to_fill += togen; //printf("Generated %llu/%llu\n", to_fill, nitems); } return vals; } template <typename Filter, typename Key, typename Val> __global__ void find_first_fill(Filter * filter, Key * keys, Val * vals, uint64_t nitems, uint64_t * returned_nitems){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid != 0) return; // if (tile.thread_rank() == 0){ // for (int i = 0; i < 10; i++){ // printf("%d: %llu, %llu\n", i, keys[i], vals[i]); // } // } //printf("Starting!\n"); for (uint64_t i = 0; i < nitems; i++){ if (!filter->insert(tile, keys[i])){ if (tile.thread_rank() == 0){ printf("Inserted %llu / %llu, %f full\n", i, nitems, 1.0*i/nitems); } returned_nitems[0] = i; return; } else { Val alt_val = 0; assert(filter->query(tile, keys[i], alt_val)); assert(alt_val == vals[i]); } } if (tile.thread_rank() == 0) printf("All %llu items inserted\n", nitems); } template <typename Filter, typename Key, typename Val> __global__ void speed_insert_kernel(Filter * filter, Key * keys, Val * vals, uint64_t nvals, uint64_t * misses){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid >= nvals) return; if (!filter->insert(tile, keys[tid], vals[tid])){ if (tile.thread_rank() == 0) atomicAdd((unsigned long long int *) misses, 1ULL); } else{ Val test_val = 0; assert(filter->query(tile, keys[tid], test_val)); assert(test_val == vals[tid]); } //assert(filter->insert(tile, keys[tid], vals[tid])); } template <typename Filter, typename Key, typename Val> __global__ void speed_remove_kernel(Filter * filter, Key * keys, uint64_t nvals, uint64_t * misses){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid >= nvals) return; if (!filter->remove(tile, keys[tid]) && tile.thread_rank() == 0){ atomicAdd((unsigned long long int *) misses, 1ULL); } //else{ // Val test_val = 0; // assert(filter->query(tile, keys[tid], test_val)); // } //assert(filter->insert(tile, keys[tid], vals[tid])); } __global__ void count_bf_misses(bool * vals, uint64_t nitems, uint64_t * misses){ uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid >= nitems) return; if (!vals[tid]){ atomicAdd((unsigned long long int *) misses, 1ULL); } } template <typename Filter, typename Key, typename Val> __global__ void speed_query_kernel(Filter * filter, Key * keys, Val * vals, uint64_t nvals, uint64_t * query_misses, uint64_t * query_failures){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid >= nvals) return; Val test_val = 0; if (!filter->query(tile,keys[tid], test_val)){ if(tile.thread_rank() == 0) atomicAdd((unsigned long long int *) query_misses, 1ULL); } else { if (test_val != vals[tid] && tile.thread_rank() == 0){ atomicAdd((unsigned long long int *) query_failures, 1ULL); } } //assert(filter->query(tile, keys[tid], val)); } __host__ void test_tcf_speed(const std::string& filename, int num_bits, int num_batches, bool first_file){ using Key = uint64_t; using Val = uint16_t; using Filter = tcf; std::cout << "Starting " << filename << " " << num_bits << std::endl; // poggers::sizing::size_in_num_slots<2> pre_init ((1ULL << num_bits), (1ULL << num_bits)/100); // poggers::sizing::size_in_num_slots<2> * Initializer = &pre_init; poggers::sizing::size_in_num_slots<2> pre_init ((1ULL << num_bits), (1ULL << num_bits)/100); poggers::sizing::size_in_num_slots<2> * Initializer = &pre_init; uint64_t nitems = Initializer->total()*.9; Key * host_keys = generate_data<Key>(nitems); Val * host_vals = generate_data<Val>(nitems); Key * fp_keys = generate_data<Key>(nitems); Key * dev_keys; Val * dev_vals; uint64_t * misses; hipMallocManaged((void **)& misses, sizeof(uint64_t)*6); hipDeviceSynchronize(); printf("Data generated\n"); misses[0] = 0; misses[1] = 0; misses[2] = 0; misses[3] = 0; misses[4] = 0; misses[5] = 0; //static seed for testing Filter * test_filter = Filter::generate_on_device(Initializer, 42); hipDeviceSynchronize(); //init timing materials std::chrono::duration<double> * insert_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * query_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * fp_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * delete_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); uint64_t * batch_amount = (uint64_t *) malloc(num_batches*sizeof(uint64_t)); //print_tid_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(nitems),test_filter->get_block_size(nitems)>>>(test_filter, dev_keys, dev_vals, nitems); for (uint64_t i = 0; i < num_batches; i++){ uint64_t start_of_batch = i*nitems/num_batches; uint64_t items_in_this_batch = (i+1)*nitems/num_batches; if (items_in_this_batch > nitems) items_in_this_batch = nitems; items_in_this_batch = items_in_this_batch - start_of_batch; batch_amount[i] = items_in_this_batch; hipMalloc((void **)& dev_keys, items_in_this_batch*sizeof(Key)); hipMalloc((void **)& dev_vals, items_in_this_batch*sizeof(Val)); hipMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), hipMemcpyHostToDevice); hipMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), hipMemcpyHostToDevice); //ensure GPU is caught up for next task hipDeviceSynchronize(); auto insert_start = std::chrono::high_resolution_clock::now(); //add function for configure parameters - should be called by ht and return dim3 hipLaunchKernelGGL(( speed_insert_kernel<Filter, Key, Val>), dim3(test_filter->get_num_blocks(items_in_this_batch)),dim3(test_filter->get_block_size(items_in_this_batch)), 0, 0, test_filter, dev_keys, dev_vals, items_in_this_batch, misses); hipDeviceSynchronize(); auto insert_end = std::chrono::high_resolution_clock::now(); insert_diff[i] = insert_end-insert_start; hipMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), hipMemcpyHostToDevice); hipMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto query_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( speed_query_kernel<Filter, Key, Val>), dim3(test_filter->get_num_blocks(items_in_this_batch)),dim3(test_filter->get_block_size(items_in_this_batch)), 0, 0, test_filter, dev_keys, dev_vals, items_in_this_batch, &misses[1], &misses[2]); hipDeviceSynchronize(); auto query_end = std::chrono::high_resolution_clock::now(); query_diff[i] = query_end - query_start; hipMemcpy(dev_keys, fp_keys+start_of_batch, items_in_this_batch*sizeof(Key), hipMemcpyHostToDevice); hipMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto fp_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( speed_query_kernel<Filter, Key, Val>), dim3(test_filter->get_num_blocks(items_in_this_batch)),dim3(test_filter->get_block_size(items_in_this_batch)), 0, 0, test_filter, dev_keys, dev_vals, items_in_this_batch, &misses[3], &misses[4]); hipDeviceSynchronize(); auto fp_end = std::chrono::high_resolution_clock::now(); fp_diff[i] = fp_end-fp_start; hipFree(dev_keys); hipFree(dev_vals); } //deletes for (uint64_t i = 0; i < num_batches; i++){ uint64_t start_of_batch = i*nitems/num_batches; uint64_t items_in_this_batch = (i+1)*nitems/num_batches; if (items_in_this_batch > nitems) items_in_this_batch = nitems; items_in_this_batch = items_in_this_batch - start_of_batch; // batch_amount[i] = items_in_this_batch; hipMalloc((void **)& dev_keys, items_in_this_batch*sizeof(Key)); //hipMalloc((void **)& dev_vals, items_in_this_batch*sizeof(Val)); hipMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), hipMemcpyHostToDevice); //hipMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto delete_start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( speed_remove_kernel<Filter, Key, Val>), dim3(test_filter->get_num_blocks(items_in_this_batch)),dim3(test_filter->get_block_size(items_in_this_batch)), 0, 0, test_filter, dev_keys, items_in_this_batch, &misses[5]); hipDeviceSynchronize(); auto delete_end = std::chrono::high_resolution_clock::now(); delete_diff[i] = delete_end - delete_start; } hipDeviceSynchronize(); Filter::free_on_device(test_filter); free(host_keys); free(host_vals); free(fp_keys); //free pieces //time to output printf("nbits: %d nitems: %llu, inserts: %llu, query missed: %llu, query wrong %llu, fp missed %llu, fp wrong %llu, %llu deletes missed\n", num_bits, nitems, misses[0], misses[1], misses[2], misses[3], misses[4], misses[5]); std::chrono::duration<double> summed_insert_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_insert_diff += insert_diff[i]; } std::chrono::duration<double> summed_query_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_query_diff += query_diff[i]; } std::chrono::duration<double> summed_fp_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_fp_diff += fp_diff[i]; } std::chrono::duration<double> summed_delete_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_delete_diff += delete_diff[i]; } std::string insert_file = filename + "_" + std::to_string(num_bits) + "_insert.txt"; std::string query_file = filename + "_" + std::to_string(num_bits) + "_lookup.txt"; std::string fp_file = filename + "_" + std::to_string(num_bits) + "_fp.txt"; //agg files std::string agg_insert_file = filename + "_aggregate_inserts.txt"; std::string agg_lookup_file = filename + "_aggregate_lookup.txt"; std::string agg_fp_file = filename + "_aggregate_fp.txt"; std::string del_file = filename + "_aggregate_deletes.txt"; // std::cout << insert_file << std::endl; FILE *fp_insert = fopen(insert_file.c_str(), "w"); FILE *fp_lookup = fopen(query_file.c_str(), "w"); FILE *fp_false_lookup = fopen(fp_file.c_str(), "w"); FILE *fp_agg_insert; FILE *fp_agg_lookup; FILE *fp_agg_fp; FILE *fp_agg_del; if (first_file){ fp_agg_insert = fopen(agg_insert_file.c_str(), "w"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "w"); fp_agg_fp = fopen(agg_fp_file.c_str(), "w"); fp_agg_del = fopen(del_file.c_str(), "w"); } else { fp_agg_insert = fopen(agg_insert_file.c_str(), "a"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "a"); fp_agg_fp = fopen(agg_fp_file.c_str(), "a"); fp_agg_del = fopen(del_file.c_str(), "a"); } if (fp_insert == NULL) { printf("Can't open the data file %s\n", insert_file); exit(1); } if (fp_lookup == NULL ) { printf("Can't open the data file %s\n", query_file); exit(1); } if (fp_false_lookup == NULL) { printf("Can't open the data file %s\n", fp_file); exit(1); } if (fp_agg_insert == NULL || fp_agg_lookup == NULL || fp_agg_fp == NULL) { printf("Can't open the aggregate files for %s-%d\n", filename, num_bits); exit(1); } if (fp_agg_del == NULL){ std::cout << "Cant open " << del_file << std::endl; } //inserts //const uint64_t scaling_factor = 1ULL; const uint64_t scaling_factor = 1000000ULL; std::cout << "Writing results to file: " << insert_file << std::endl; fprintf(fp_insert, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_insert, "%d", i*100/num_batches); fprintf(fp_insert, " %f\n", batch_amount[i]/(scaling_factor*insert_diff[i].count())); } //queries std::cout << "Writing results to file: " << query_file << std::endl; fprintf(fp_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_lookup, "%d", i*100/num_batches); fprintf(fp_lookup, " %f\n", batch_amount[i]/(scaling_factor*query_diff[i].count())); } std::cout << "Writing results to file: " << fp_file << std::endl; fprintf(fp_false_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_false_lookup, "%d", i*100/num_batches); fprintf(fp_false_lookup, " %f\n", batch_amount[i]/(scaling_factor*fp_diff[i].count())); } if (first_file){ fprintf(fp_agg_insert, "x_0 y_0\n"); fprintf(fp_agg_lookup, "x_0 y_0\n"); fprintf(fp_agg_fp, "x_0 y_0\n"); fprintf(fp_agg_del, "x_0 y_0\n"); } // fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(1000ULL*summed_insert_diff.count())); // fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(1000ULL*summed_query_diff.count())); // fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(1000ULL*summed_fp_diff.count())); fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(scaling_factor*summed_insert_diff.count())); fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(scaling_factor*summed_query_diff.count())); fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(scaling_factor*summed_fp_diff.count())); fprintf(fp_agg_del, "%d %f\n", num_bits, nitems/(scaling_factor*summed_delete_diff.count())); fclose(fp_insert); fclose(fp_lookup); fclose(fp_false_lookup); fclose(fp_agg_insert); fclose(fp_agg_lookup); fclose(fp_agg_fp); fclose(fp_agg_del); return; } template <typename Filter, typename Key, typename Val> __host__ void tcqf_find_first_fill(uint64_t num_bits){ //std::cout << "Starting " << filename << " " << num_bits << std::endl; poggers::sizing::size_in_num_slots<1> pre_init ((1ULL << num_bits)); poggers::sizing::size_in_num_slots<1> * Initializer = &pre_init; // poggers::sizing::size_in_num_slots<2> pre_init ((1ULL << num_bits), (1ULL << num_bits)/100); // poggers::sizing::size_in_num_slots<2> * Initializer = &pre_init; // poggers::sizing::size_in_num_slots<1> pre_init ((1ULL << num_bits)); // poggers::sizing::size_in_num_slots<1> * Initializer = &pre_init; uint64_t nitems = Initializer->total(); Key * host_keys = generate_data<Key>(nitems); Val * host_vals = generate_data<Val>(nitems); Key * dev_keys; Val * dev_vals; // printf("Host keys\n"); // for (int i = 0; i < 10; i++){ // printf("%d: %llu, %llu\n", i, host_keys[i], host_vals[i]); // } uint64_t * misses; hipMallocManaged((void ** )&misses, sizeof(uint64_t)*2); misses[0] = 0; misses[1] = 0; uint64_t * returned_nitems; hipMallocManaged((void **)&returned_nitems, sizeof(uint64_t)); returned_nitems[0] = 0; hipMalloc((void **)&dev_keys, sizeof(Key)*nitems); hipMalloc((void **)&dev_vals, sizeof(Val)*nitems); hipMemcpy(dev_keys, host_keys, sizeof(Key)*nitems, hipMemcpyHostToDevice); hipMemcpy(dev_vals, host_vals, sizeof(Val)*nitems, hipMemcpyHostToDevice); Filter * test_filter = Filter::generate_on_device(Initializer, 42); printf("Test size: %llu\n", num_bits); hipDeviceSynchronize(); hipLaunchKernelGGL(( find_first_fill<Filter, Key, Val>), dim3(1), dim3(32), 0, 0, test_filter, dev_keys, dev_vals, nitems, returned_nitems); hipDeviceSynchronize(); printf("Returned %llu\n", returned_nitems[0]); hipMemcpy(dev_keys, host_keys, sizeof(Key)*nitems, hipMemcpyHostToDevice); hipMemcpy(dev_vals, host_vals, sizeof(Val)*nitems, hipMemcpyHostToDevice); hipDeviceSynchronize(); uint64_t new_nitems = returned_nitems[0]; hipLaunchKernelGGL(( speed_query_kernel<Filter, Key, Val>), dim3(test_filter->get_num_blocks(new_nitems)), dim3(test_filter->get_block_size(new_nitems)), 0, 0, test_filter, dev_keys, dev_vals, new_nitems, &misses[0], &misses[1]); hipDeviceSynchronize(); printf("Final misses: initial misses %llu %f wrong values %llu %f\n", misses[0], 1.0*misses[0]/new_nitems, misses[1], 1.0*misses[1]/new_nitems); hipDeviceSynchronize(); hipFree(misses); hipFree(returned_nitems); Filter::free_on_device(test_filter); hipFree(dev_keys); hipFree(dev_vals); } __host__ void test_bloom_speed(const std::string& filename, int num_bits, int num_batches, bool first_file){ using Key = uint64_t; using Val = bool; //using Filter = tcqf; std::cout << "Starting " << filename << " " << num_bits << std::endl; uint64_t nitems = (1ULL << num_bits)*.9; printf("Starting filter with %llu items to inserts\n", nitems); Key * host_keys = generate_data<Key>(nitems); //Val * host_vals = generate_data<Val>(nitems); Key * fp_keys = generate_data<Key>(nitems); Key * dev_keys; Val * dev_vals; uint64_t * misses; hipMallocManaged((void **)& misses, sizeof(uint64_t)*5); hipDeviceSynchronize(); printf("Data generated\n"); misses[0] = 0; misses[1] = 0; misses[2] = 0; misses[3] = 0; misses[4] = 0; //static seed for testing warpcore_bloom bloom_filter(num_slots_per_p2(nitems), 7); hipDeviceSynchronize(); //init timing materials std::chrono::duration<double> * insert_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * query_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * fp_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); uint64_t * batch_amount = (uint64_t *) malloc(num_batches*sizeof(uint64_t)); //print_tid_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(nitems),test_filter->get_block_size(nitems)>>>(test_filter, dev_keys, dev_vals, nitems); for (uint64_t i = 0; i < num_batches; i++){ uint64_t start_of_batch = i*nitems/num_batches; uint64_t items_in_this_batch = (i+1)*nitems/num_batches; if (items_in_this_batch > nitems) items_in_this_batch = nitems; items_in_this_batch = items_in_this_batch - start_of_batch; batch_amount[i] = items_in_this_batch; hipMalloc((void **)& dev_keys, items_in_this_batch*sizeof(Key)); hipMalloc((void **)& dev_vals, items_in_this_batch*sizeof(Val)); hipMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), hipMemcpyHostToDevice); //hipMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), hipMemcpyHostToDevice); //ensure GPU is caught up for next task hipDeviceSynchronize(); auto insert_start = std::chrono::high_resolution_clock::now(); //add function for configure parameters - should be called by ht and return dim3 bloom_filter.insert(dev_keys, items_in_this_batch); hipDeviceSynchronize(); auto insert_end = std::chrono::high_resolution_clock::now(); insert_diff[i] = insert_end-insert_start; hipMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), hipMemcpyHostToDevice); //hipMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto query_start = std::chrono::high_resolution_clock::now(); bloom_filter.retrieve(dev_keys, items_in_this_batch, dev_vals); hipDeviceSynchronize(); auto query_end = std::chrono::high_resolution_clock::now(); //return; query_diff[i] = query_end - query_start; hipLaunchKernelGGL(( count_bf_misses), dim3((items_in_this_batch-1)/1024 +1), dim3(1024), 0, 0, dev_vals, items_in_this_batch, &misses[0]); hipMemcpy(dev_keys, fp_keys+start_of_batch, items_in_this_batch*sizeof(Key), hipMemcpyHostToDevice); //hipMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto fp_start = std::chrono::high_resolution_clock::now(); bloom_filter.retrieve(dev_keys, items_in_this_batch, dev_vals); hipDeviceSynchronize(); auto fp_end = std::chrono::high_resolution_clock::now(); fp_diff[i] = fp_end-fp_start; hipLaunchKernelGGL(( count_bf_misses), dim3((items_in_this_batch-1)/1024 +1), dim3(1024), 0, 0, dev_vals, items_in_this_batch, &misses[1]); hipDeviceSynchronize(); hipFree(dev_keys); hipFree(dev_vals); } hipDeviceSynchronize(); //Filter::free_on_device(test_filter); free(host_keys); //free(host_vals); free(fp_keys); //free pieces //time to output printf("%llu %llu %f %llu %f %f \n", nitems, misses[0], 1.0*(misses[0])/nitems, misses[1], 1.0*misses[1]/nitems, 1.0 - 1.0*misses[1]/nitems); std::chrono::duration<double> summed_insert_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_insert_diff += insert_diff[i]; } std::chrono::duration<double> summed_query_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_query_diff += query_diff[i]; } std::chrono::duration<double> summed_fp_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_fp_diff += fp_diff[i]; } std::string insert_file = filename + "_" + std::to_string(num_bits) + "_insert.txt"; std::string query_file = filename + "_" + std::to_string(num_bits) + "_lookup.txt"; std::string fp_file = filename + "_" + std::to_string(num_bits) + "_fp.txt"; //agg files std::string agg_insert_file = filename + "_aggregate_inserts.txt"; std::string agg_lookup_file = filename + "_aggregate_lookup.txt"; std::string agg_fp_file = filename + "_aggregate_fp.txt"; // std::cout << insert_file << std::endl; FILE *fp_insert = fopen(insert_file.c_str(), "w"); FILE *fp_lookup = fopen(query_file.c_str(), "w"); FILE *fp_false_lookup = fopen(fp_file.c_str(), "w"); FILE *fp_agg_insert; FILE *fp_agg_lookup; FILE *fp_agg_fp; if (first_file){ fp_agg_insert = fopen(agg_insert_file.c_str(), "w"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "w"); fp_agg_fp = fopen(agg_fp_file.c_str(), "w"); } else { fp_agg_insert = fopen(agg_insert_file.c_str(), "a"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "a"); fp_agg_fp = fopen(agg_fp_file.c_str(), "a"); } if (fp_insert == NULL) { printf("Can't open the data file %s\n", insert_file); exit(1); } if (fp_lookup == NULL ) { printf("Can't open the data file %s\n", query_file); exit(1); } if (fp_false_lookup == NULL) { printf("Can't open the data file %s\n", fp_file); exit(1); } if (fp_agg_insert == NULL || fp_agg_lookup == NULL || fp_agg_fp == NULL) { printf("Can't open the aggregate files for %s-%d\n", filename, num_bits); exit(1); } //inserts //const uint64_t scaling_factor = 1ULL; const uint64_t scaling_factor = 1000000ULL; std::cout << "Writing results to file: " << insert_file << std::endl; fprintf(fp_insert, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_insert, "%d", i*100/num_batches); fprintf(fp_insert, " %f\n", batch_amount[i]/(scaling_factor*insert_diff[i].count())); } //queries std::cout << "Writing results to file: " << query_file << std::endl; fprintf(fp_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_lookup, "%d", i*100/num_batches); fprintf(fp_lookup, " %f\n", batch_amount[i]/(scaling_factor*query_diff[i].count())); } std::cout << "Writing results to file: " << fp_file << std::endl; fprintf(fp_false_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_false_lookup, "%d", i*100/num_batches); fprintf(fp_false_lookup, " %f\n", batch_amount[i]/(scaling_factor*fp_diff[i].count())); } if (first_file){ fprintf(fp_agg_insert, "x_0 y_0\n"); fprintf(fp_agg_lookup, "x_0 y_0\n"); fprintf(fp_agg_fp, "x_0 y_0\n"); } // fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(1000ULL*summed_insert_diff.count())); // fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(1000ULL*summed_query_diff.count())); // fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(1000ULL*summed_fp_diff.count())); fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(scaling_factor*summed_insert_diff.count())); fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(scaling_factor*summed_query_diff.count())); fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(scaling_factor*summed_fp_diff.count())); fclose(fp_insert); fclose(fp_lookup); fclose(fp_false_lookup); fclose(fp_agg_insert); fclose(fp_agg_lookup); fclose(fp_agg_fp); return; } __host__ void test_first_fail(uint64_t num_bits){ //tcqf_find_first_fill<tcqf, uint16_t, uint16_t>(num_bits); //tcqf_find_first_fill<tcqf_no_back, uint64_t, uint16_t>(num_bits); } int main(int argc, char** argv) { // poggers::sizing::size_in_num_slots<1> first_size_20(1ULL << 20); // printf("2^20\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_20); // poggers::sizing::size_in_num_slots<1> first_size_22(1ULL << 22); // printf("2^22\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_22); // poggers::sizing::size_in_num_slots<1> first_size_24(1ULL << 24); // printf("2^24\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_24); // poggers::sizing::size_in_num_slots<1> first_size_26(1ULL << 26); // printf("2^26\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_26); // poggers::sizing::size_in_num_slots<1> first_size_28(1ULL << 28); // printf("2^28\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_28); // printf("alt table\n"); // poggers::sizing::size_in_num_slots<1>half_split_20(6000); // test_speed<p2_table, key_type, val_type>(&half_split_20); // test_speed<small_double_type, uint64_t, uint64_t>(&half_split_22); // poggers::sizing::size_in_num_slots<2>half_split_24(1ULL << 23, 1ULL << 23); // test_speed<small_double_type, uint64_t, uint64_t>(&half_split_24); // poggers::sizing::size_in_num_slots<2>half_split_26(1ULL << 25, 1ULL << 25); // test_speed<small_double_type, uint64_t, uint64_t>(&half_split_26); // printf("P2 tiny table\n"); // poggers::sizing::size_in_num_slots<1>half_split_28(1ULL << 28); // test_speed<p2_table, key_type, val_type>(&half_split_28); // poggers::sizing::variadic_size size(100000,100); // tcqf * test_tcqf = tcqf::generate_on_device(&size, 42); // hipDeviceSynchronize(); // tcqf::free_on_device(test_tcqf); // warpcore_bloom my_filter((1ULL << 20), 7); // test_bloom_speed("bloom_results/test", 20, 20, true); test_bloom_speed("bloom_results/test", 22, 20, true); test_bloom_speed("bloom_results/test", 24, 20, false); test_bloom_speed("bloom_results/test", 26, 20, false); test_bloom_speed("bloom_results/test", 28, 20, false); test_bloom_speed("bloom_results/test", 30, 20, false); // test_tcf_speed("results/test", 20, 20, true); test_tcf_speed("results/test", 22, 20, true); test_tcf_speed("results/test", 24, 20, false); test_tcf_speed("results/test", 26, 20, false); test_tcf_speed("results/test", 28, 20, false); test_tcf_speed("results/test", 30, 20, false); // test_first_fail(22); // test_first_fail(24); // test_first_fail(26); // test_first_fail(28); // test_first_fail(30); return 0; }
a7a4bc20eeee60b15ed1bf2192daaa28e7823a9e.cu
/* * ============================================================================ * * Authors: * Hunter McCoy <[email protected] * * * About: * This file contains k-mer speed tests for several Hash Table Types * built using POGGERS. For more verbose testing please see the * benchmarks folder. * * ============================================================================ */ //#include "include/templated_quad_table.cuh" #include <poggers/metadata.cuh> #include <poggers/hash_schemes/murmurhash.cuh> #include <poggers/probing_schemes/linear_probing.cuh> #include <poggers/probing_schemes/double_hashing.cuh> #include <poggers/insert_schemes/power_of_n_shortcut.cuh> #include <poggers/insert_schemes/single_slot_insert.cuh> #include <poggers/insert_schemes/bucket_insert.cuh> #include <poggers/insert_schemes/power_of_n.cuh> #include <poggers/representations/key_val_pair.cuh> #include <poggers/representations/shortened_key_val_pair.cuh> #include <poggers/probing_schemes/xor_power_of_two.cuh> #include <poggers/representations/dynamic_container.cuh> #include <poggers/representations/key_only.cuh> #include <poggers/sizing/default_sizing.cuh> #include <poggers/sizing/variadic_sizing.cuh> #include <poggers/tables/base_table.cuh> #include <poggers/metadata.cuh> #include <poggers/hash_schemes/murmurhash.cuh> #include <poggers/probing_schemes/linear_probing.cuh> #include <poggers/probing_schemes/double_hashing.cuh> #include <poggers/probing_schemes/power_of_two.cuh> #include <poggers/insert_schemes/single_slot_insert.cuh> #include <poggers/insert_schemes/bucket_insert.cuh> #include <poggers/insert_schemes/power_of_n.cuh> #include <poggers/representations/key_val_pair.cuh> #include <poggers/representations/shortened_key_val_pair.cuh> #include <poggers/sizing/default_sizing.cuh> #include <poggers/tables/base_table.cuh> #include <poggers/insert_schemes/power_of_n_shortcut.cuh> #include <poggers/sizing/variadic_sizing.cuh> #include <poggers/representations/soa.cuh> #include <poggers/insert_schemes/power_of_n_shortcut_buckets.cuh> #include <poggers/tables/bucketed_table.cuh> #include <poggers/metadata.cuh> #include <poggers/hash_schemes/murmurhash.cuh> #include <poggers/probing_schemes/double_hashing.cuh> #include <poggers/probing_schemes/power_of_two.cuh> // new container for 2-byte key val pairs #include <poggers/representations/grouped_key_val_pair.cuh> #include <poggers/representations/key_val_pair.cuh> #include <poggers/representations/dynamic_container.cuh> #include <poggers/sizing/default_sizing.cuh> #include <poggers/insert_schemes/power_of_n_shortcut.cuh> #include <poggers/insert_schemes/power_of_n_shortcut_buckets.cuh> #include <poggers/representations/packed_bucket.cuh> #include <poggers/insert_schemes/linear_insert_buckets.cuh> #include <poggers/tables/bucketed_table.cuh> #include <poggers/representations/grouped_storage_sub_bits.cuh> #include <poggers/probing_schemes/xor_power_of_two.cuh> #include <stdio.h> #include <iostream> #include <chrono> #include <openssl/rand.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <sys/mman.h> #include <unistd.h> #include <random> #include <assert.h> #include <chrono> #include <iostream> #include <fstream> #include <string> #include <algorithm> #include <bitset> #include <warpcore/bloom_filter.cuh> // using tiny_static_table_4 = poggers::tables::static_table<uint64_t, uint16_t, poggers::representations::dynamic_container<poggers::representations::key_container,uint16_t>::representation, 4, 4, poggers::insert_schemes::bucket_insert, 20, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher>; // using tcf = poggers::tables::static_table<uint64_t,uint16_t, poggers::representations::dynamic_container<poggers::representations::key_container,uint16_t>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_scheme, 2, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher, true, tiny_static_table_4>; //Same table but with value support //swap out key-only container with key-val pair - dynamic container automatically compresses keys/vals into one uint32 // using tiny_static_table_4 = poggers::tables::static_table<uint64_t, uint16_t, poggers::representations::dynamic_container<poggers::representations::key_val_pair,uint16_t>::representation, 4, 4, poggers::insert_schemes::bucket_insert, 20, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher>; // using tcf = poggers::tables::static_table<uint64_t,uint16_t, poggers::representations::dynamic_container<poggers::representations::key_val_pair,uint16_t>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_scheme, 2, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher, true, tiny_static_table_4>; using del_backing_table = poggers::tables::bucketed_table< uint64_t, uint16_t, poggers::representations::dynamic_bucket_container<poggers::representations::dynamic_container< poggers::representations::bit_grouped_container<16, 16>::representation, uint16_t>::representation>::representation, 4, 8, poggers::insert_schemes::linear_insert_bucket_scheme, 400, poggers::probing_schemes::linearProber, poggers::hashers::murmurHasher>; using tcf = poggers::tables::bucketed_table< uint64_t, uint16_t, poggers::representations::dynamic_bucket_container<poggers::representations::dynamic_container< poggers::representations::bit_grouped_container<16, 16>::representation, uint16_t>::representation>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_bucket_scheme, 2, poggers::probing_schemes::XORPowerOfTwoHasher, poggers::hashers::murmurHasher, true, del_backing_table>; //using tcqf_no_back = poggers::tables::static_table<uint64_t,uint16_t, poggers::representations::dynamic_container<poggers::representations::key_container,uint16_t>::representation, 4, 16, poggers::insert_schemes::power_of_n_insert_shortcut_scheme, 2, poggers::probing_schemes::doubleHasher, poggers::hashers::murmurHasher>; using warpcore_bloom = warpcore::BloomFilter<uint64_t>; #define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } uint64_t num_slots_per_p2(uint64_t nitems){ //uint64_t nitems = .9*(1ULL << nbits); //for p=1/100, this is the correct value uint64_t nslots = 959*nitems/100; printf("using %llu slots\n", nslots); return nslots; } template <typename T> __host__ T * generate_data(uint64_t nitems){ //malloc space T * vals = (T *) malloc(nitems * sizeof(T)); // 100,000,000 uint64_t cap = 100000000ULL; for (uint64_t to_fill = 0; to_fill < nitems; to_fill+=0){ uint64_t togen = (nitems - to_fill > cap) ? cap : nitems - to_fill; RAND_bytes((unsigned char *) (vals + to_fill), togen * sizeof(T)); to_fill += togen; //printf("Generated %llu/%llu\n", to_fill, nitems); } return vals; } template <typename Filter, typename Key, typename Val> __global__ void find_first_fill(Filter * filter, Key * keys, Val * vals, uint64_t nitems, uint64_t * returned_nitems){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid != 0) return; // if (tile.thread_rank() == 0){ // for (int i = 0; i < 10; i++){ // printf("%d: %llu, %llu\n", i, keys[i], vals[i]); // } // } //printf("Starting!\n"); for (uint64_t i = 0; i < nitems; i++){ if (!filter->insert(tile, keys[i])){ if (tile.thread_rank() == 0){ printf("Inserted %llu / %llu, %f full\n", i, nitems, 1.0*i/nitems); } returned_nitems[0] = i; return; } else { Val alt_val = 0; assert(filter->query(tile, keys[i], alt_val)); assert(alt_val == vals[i]); } } if (tile.thread_rank() == 0) printf("All %llu items inserted\n", nitems); } template <typename Filter, typename Key, typename Val> __global__ void speed_insert_kernel(Filter * filter, Key * keys, Val * vals, uint64_t nvals, uint64_t * misses){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid >= nvals) return; if (!filter->insert(tile, keys[tid], vals[tid])){ if (tile.thread_rank() == 0) atomicAdd((unsigned long long int *) misses, 1ULL); } else{ Val test_val = 0; assert(filter->query(tile, keys[tid], test_val)); assert(test_val == vals[tid]); } //assert(filter->insert(tile, keys[tid], vals[tid])); } template <typename Filter, typename Key, typename Val> __global__ void speed_remove_kernel(Filter * filter, Key * keys, uint64_t nvals, uint64_t * misses){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid >= nvals) return; if (!filter->remove(tile, keys[tid]) && tile.thread_rank() == 0){ atomicAdd((unsigned long long int *) misses, 1ULL); } //else{ // Val test_val = 0; // assert(filter->query(tile, keys[tid], test_val)); // } //assert(filter->insert(tile, keys[tid], vals[tid])); } __global__ void count_bf_misses(bool * vals, uint64_t nitems, uint64_t * misses){ uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid >= nitems) return; if (!vals[tid]){ atomicAdd((unsigned long long int *) misses, 1ULL); } } template <typename Filter, typename Key, typename Val> __global__ void speed_query_kernel(Filter * filter, Key * keys, Val * vals, uint64_t nvals, uint64_t * query_misses, uint64_t * query_failures){ auto tile = filter->get_my_tile(); uint64_t tid = tile.meta_group_size()*blockIdx.x + tile.meta_group_rank(); if (tid >= nvals) return; Val test_val = 0; if (!filter->query(tile,keys[tid], test_val)){ if(tile.thread_rank() == 0) atomicAdd((unsigned long long int *) query_misses, 1ULL); } else { if (test_val != vals[tid] && tile.thread_rank() == 0){ atomicAdd((unsigned long long int *) query_failures, 1ULL); } } //assert(filter->query(tile, keys[tid], val)); } __host__ void test_tcf_speed(const std::string& filename, int num_bits, int num_batches, bool first_file){ using Key = uint64_t; using Val = uint16_t; using Filter = tcf; std::cout << "Starting " << filename << " " << num_bits << std::endl; // poggers::sizing::size_in_num_slots<2> pre_init ((1ULL << num_bits), (1ULL << num_bits)/100); // poggers::sizing::size_in_num_slots<2> * Initializer = &pre_init; poggers::sizing::size_in_num_slots<2> pre_init ((1ULL << num_bits), (1ULL << num_bits)/100); poggers::sizing::size_in_num_slots<2> * Initializer = &pre_init; uint64_t nitems = Initializer->total()*.9; Key * host_keys = generate_data<Key>(nitems); Val * host_vals = generate_data<Val>(nitems); Key * fp_keys = generate_data<Key>(nitems); Key * dev_keys; Val * dev_vals; uint64_t * misses; cudaMallocManaged((void **)& misses, sizeof(uint64_t)*6); cudaDeviceSynchronize(); printf("Data generated\n"); misses[0] = 0; misses[1] = 0; misses[2] = 0; misses[3] = 0; misses[4] = 0; misses[5] = 0; //static seed for testing Filter * test_filter = Filter::generate_on_device(Initializer, 42); cudaDeviceSynchronize(); //init timing materials std::chrono::duration<double> * insert_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * query_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * fp_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * delete_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); uint64_t * batch_amount = (uint64_t *) malloc(num_batches*sizeof(uint64_t)); //print_tid_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(nitems),test_filter->get_block_size(nitems)>>>(test_filter, dev_keys, dev_vals, nitems); for (uint64_t i = 0; i < num_batches; i++){ uint64_t start_of_batch = i*nitems/num_batches; uint64_t items_in_this_batch = (i+1)*nitems/num_batches; if (items_in_this_batch > nitems) items_in_this_batch = nitems; items_in_this_batch = items_in_this_batch - start_of_batch; batch_amount[i] = items_in_this_batch; cudaMalloc((void **)& dev_keys, items_in_this_batch*sizeof(Key)); cudaMalloc((void **)& dev_vals, items_in_this_batch*sizeof(Val)); cudaMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), cudaMemcpyHostToDevice); cudaMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), cudaMemcpyHostToDevice); //ensure GPU is caught up for next task cudaDeviceSynchronize(); auto insert_start = std::chrono::high_resolution_clock::now(); //add function for configure parameters - should be called by ht and return dim3 speed_insert_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(items_in_this_batch),test_filter->get_block_size(items_in_this_batch)>>>(test_filter, dev_keys, dev_vals, items_in_this_batch, misses); cudaDeviceSynchronize(); auto insert_end = std::chrono::high_resolution_clock::now(); insert_diff[i] = insert_end-insert_start; cudaMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), cudaMemcpyHostToDevice); cudaMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto query_start = std::chrono::high_resolution_clock::now(); speed_query_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(items_in_this_batch),test_filter->get_block_size(items_in_this_batch)>>>(test_filter, dev_keys, dev_vals, items_in_this_batch, &misses[1], &misses[2]); cudaDeviceSynchronize(); auto query_end = std::chrono::high_resolution_clock::now(); query_diff[i] = query_end - query_start; cudaMemcpy(dev_keys, fp_keys+start_of_batch, items_in_this_batch*sizeof(Key), cudaMemcpyHostToDevice); cudaMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto fp_start = std::chrono::high_resolution_clock::now(); speed_query_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(items_in_this_batch),test_filter->get_block_size(items_in_this_batch)>>>(test_filter, dev_keys, dev_vals, items_in_this_batch, &misses[3], &misses[4]); cudaDeviceSynchronize(); auto fp_end = std::chrono::high_resolution_clock::now(); fp_diff[i] = fp_end-fp_start; cudaFree(dev_keys); cudaFree(dev_vals); } //deletes for (uint64_t i = 0; i < num_batches; i++){ uint64_t start_of_batch = i*nitems/num_batches; uint64_t items_in_this_batch = (i+1)*nitems/num_batches; if (items_in_this_batch > nitems) items_in_this_batch = nitems; items_in_this_batch = items_in_this_batch - start_of_batch; // batch_amount[i] = items_in_this_batch; cudaMalloc((void **)& dev_keys, items_in_this_batch*sizeof(Key)); //cudaMalloc((void **)& dev_vals, items_in_this_batch*sizeof(Val)); cudaMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), cudaMemcpyHostToDevice); //cudaMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto delete_start = std::chrono::high_resolution_clock::now(); speed_remove_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(items_in_this_batch),test_filter->get_block_size(items_in_this_batch)>>>(test_filter, dev_keys, items_in_this_batch, &misses[5]); cudaDeviceSynchronize(); auto delete_end = std::chrono::high_resolution_clock::now(); delete_diff[i] = delete_end - delete_start; } cudaDeviceSynchronize(); Filter::free_on_device(test_filter); free(host_keys); free(host_vals); free(fp_keys); //free pieces //time to output printf("nbits: %d nitems: %llu, inserts: %llu, query missed: %llu, query wrong %llu, fp missed %llu, fp wrong %llu, %llu deletes missed\n", num_bits, nitems, misses[0], misses[1], misses[2], misses[3], misses[4], misses[5]); std::chrono::duration<double> summed_insert_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_insert_diff += insert_diff[i]; } std::chrono::duration<double> summed_query_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_query_diff += query_diff[i]; } std::chrono::duration<double> summed_fp_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_fp_diff += fp_diff[i]; } std::chrono::duration<double> summed_delete_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_delete_diff += delete_diff[i]; } std::string insert_file = filename + "_" + std::to_string(num_bits) + "_insert.txt"; std::string query_file = filename + "_" + std::to_string(num_bits) + "_lookup.txt"; std::string fp_file = filename + "_" + std::to_string(num_bits) + "_fp.txt"; //agg files std::string agg_insert_file = filename + "_aggregate_inserts.txt"; std::string agg_lookup_file = filename + "_aggregate_lookup.txt"; std::string agg_fp_file = filename + "_aggregate_fp.txt"; std::string del_file = filename + "_aggregate_deletes.txt"; // std::cout << insert_file << std::endl; FILE *fp_insert = fopen(insert_file.c_str(), "w"); FILE *fp_lookup = fopen(query_file.c_str(), "w"); FILE *fp_false_lookup = fopen(fp_file.c_str(), "w"); FILE *fp_agg_insert; FILE *fp_agg_lookup; FILE *fp_agg_fp; FILE *fp_agg_del; if (first_file){ fp_agg_insert = fopen(agg_insert_file.c_str(), "w"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "w"); fp_agg_fp = fopen(agg_fp_file.c_str(), "w"); fp_agg_del = fopen(del_file.c_str(), "w"); } else { fp_agg_insert = fopen(agg_insert_file.c_str(), "a"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "a"); fp_agg_fp = fopen(agg_fp_file.c_str(), "a"); fp_agg_del = fopen(del_file.c_str(), "a"); } if (fp_insert == NULL) { printf("Can't open the data file %s\n", insert_file); exit(1); } if (fp_lookup == NULL ) { printf("Can't open the data file %s\n", query_file); exit(1); } if (fp_false_lookup == NULL) { printf("Can't open the data file %s\n", fp_file); exit(1); } if (fp_agg_insert == NULL || fp_agg_lookup == NULL || fp_agg_fp == NULL) { printf("Can't open the aggregate files for %s-%d\n", filename, num_bits); exit(1); } if (fp_agg_del == NULL){ std::cout << "Cant open " << del_file << std::endl; } //inserts //const uint64_t scaling_factor = 1ULL; const uint64_t scaling_factor = 1000000ULL; std::cout << "Writing results to file: " << insert_file << std::endl; fprintf(fp_insert, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_insert, "%d", i*100/num_batches); fprintf(fp_insert, " %f\n", batch_amount[i]/(scaling_factor*insert_diff[i].count())); } //queries std::cout << "Writing results to file: " << query_file << std::endl; fprintf(fp_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_lookup, "%d", i*100/num_batches); fprintf(fp_lookup, " %f\n", batch_amount[i]/(scaling_factor*query_diff[i].count())); } std::cout << "Writing results to file: " << fp_file << std::endl; fprintf(fp_false_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_false_lookup, "%d", i*100/num_batches); fprintf(fp_false_lookup, " %f\n", batch_amount[i]/(scaling_factor*fp_diff[i].count())); } if (first_file){ fprintf(fp_agg_insert, "x_0 y_0\n"); fprintf(fp_agg_lookup, "x_0 y_0\n"); fprintf(fp_agg_fp, "x_0 y_0\n"); fprintf(fp_agg_del, "x_0 y_0\n"); } // fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(1000ULL*summed_insert_diff.count())); // fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(1000ULL*summed_query_diff.count())); // fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(1000ULL*summed_fp_diff.count())); fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(scaling_factor*summed_insert_diff.count())); fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(scaling_factor*summed_query_diff.count())); fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(scaling_factor*summed_fp_diff.count())); fprintf(fp_agg_del, "%d %f\n", num_bits, nitems/(scaling_factor*summed_delete_diff.count())); fclose(fp_insert); fclose(fp_lookup); fclose(fp_false_lookup); fclose(fp_agg_insert); fclose(fp_agg_lookup); fclose(fp_agg_fp); fclose(fp_agg_del); return; } template <typename Filter, typename Key, typename Val> __host__ void tcqf_find_first_fill(uint64_t num_bits){ //std::cout << "Starting " << filename << " " << num_bits << std::endl; poggers::sizing::size_in_num_slots<1> pre_init ((1ULL << num_bits)); poggers::sizing::size_in_num_slots<1> * Initializer = &pre_init; // poggers::sizing::size_in_num_slots<2> pre_init ((1ULL << num_bits), (1ULL << num_bits)/100); // poggers::sizing::size_in_num_slots<2> * Initializer = &pre_init; // poggers::sizing::size_in_num_slots<1> pre_init ((1ULL << num_bits)); // poggers::sizing::size_in_num_slots<1> * Initializer = &pre_init; uint64_t nitems = Initializer->total(); Key * host_keys = generate_data<Key>(nitems); Val * host_vals = generate_data<Val>(nitems); Key * dev_keys; Val * dev_vals; // printf("Host keys\n"); // for (int i = 0; i < 10; i++){ // printf("%d: %llu, %llu\n", i, host_keys[i], host_vals[i]); // } uint64_t * misses; cudaMallocManaged((void ** )&misses, sizeof(uint64_t)*2); misses[0] = 0; misses[1] = 0; uint64_t * returned_nitems; cudaMallocManaged((void **)&returned_nitems, sizeof(uint64_t)); returned_nitems[0] = 0; cudaMalloc((void **)&dev_keys, sizeof(Key)*nitems); cudaMalloc((void **)&dev_vals, sizeof(Val)*nitems); cudaMemcpy(dev_keys, host_keys, sizeof(Key)*nitems, cudaMemcpyHostToDevice); cudaMemcpy(dev_vals, host_vals, sizeof(Val)*nitems, cudaMemcpyHostToDevice); Filter * test_filter = Filter::generate_on_device(Initializer, 42); printf("Test size: %llu\n", num_bits); cudaDeviceSynchronize(); find_first_fill<Filter, Key, Val><<<1, 32>>>(test_filter, dev_keys, dev_vals, nitems, returned_nitems); cudaDeviceSynchronize(); printf("Returned %llu\n", returned_nitems[0]); cudaMemcpy(dev_keys, host_keys, sizeof(Key)*nitems, cudaMemcpyHostToDevice); cudaMemcpy(dev_vals, host_vals, sizeof(Val)*nitems, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); uint64_t new_nitems = returned_nitems[0]; speed_query_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(new_nitems), test_filter->get_block_size(new_nitems)>>>(test_filter, dev_keys, dev_vals, new_nitems, &misses[0], &misses[1]); cudaDeviceSynchronize(); printf("Final misses: initial misses %llu %f wrong values %llu %f\n", misses[0], 1.0*misses[0]/new_nitems, misses[1], 1.0*misses[1]/new_nitems); cudaDeviceSynchronize(); cudaFree(misses); cudaFree(returned_nitems); Filter::free_on_device(test_filter); cudaFree(dev_keys); cudaFree(dev_vals); } __host__ void test_bloom_speed(const std::string& filename, int num_bits, int num_batches, bool first_file){ using Key = uint64_t; using Val = bool; //using Filter = tcqf; std::cout << "Starting " << filename << " " << num_bits << std::endl; uint64_t nitems = (1ULL << num_bits)*.9; printf("Starting filter with %llu items to inserts\n", nitems); Key * host_keys = generate_data<Key>(nitems); //Val * host_vals = generate_data<Val>(nitems); Key * fp_keys = generate_data<Key>(nitems); Key * dev_keys; Val * dev_vals; uint64_t * misses; cudaMallocManaged((void **)& misses, sizeof(uint64_t)*5); cudaDeviceSynchronize(); printf("Data generated\n"); misses[0] = 0; misses[1] = 0; misses[2] = 0; misses[3] = 0; misses[4] = 0; //static seed for testing warpcore_bloom bloom_filter(num_slots_per_p2(nitems), 7); cudaDeviceSynchronize(); //init timing materials std::chrono::duration<double> * insert_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * query_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); std::chrono::duration<double> * fp_diff = (std::chrono::duration<double> *) malloc(num_batches*sizeof(std::chrono::duration<double>)); uint64_t * batch_amount = (uint64_t *) malloc(num_batches*sizeof(uint64_t)); //print_tid_kernel<Filter, Key, Val><<<test_filter->get_num_blocks(nitems),test_filter->get_block_size(nitems)>>>(test_filter, dev_keys, dev_vals, nitems); for (uint64_t i = 0; i < num_batches; i++){ uint64_t start_of_batch = i*nitems/num_batches; uint64_t items_in_this_batch = (i+1)*nitems/num_batches; if (items_in_this_batch > nitems) items_in_this_batch = nitems; items_in_this_batch = items_in_this_batch - start_of_batch; batch_amount[i] = items_in_this_batch; cudaMalloc((void **)& dev_keys, items_in_this_batch*sizeof(Key)); cudaMalloc((void **)& dev_vals, items_in_this_batch*sizeof(Val)); cudaMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), cudaMemcpyHostToDevice); //cudaMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), cudaMemcpyHostToDevice); //ensure GPU is caught up for next task cudaDeviceSynchronize(); auto insert_start = std::chrono::high_resolution_clock::now(); //add function for configure parameters - should be called by ht and return dim3 bloom_filter.insert(dev_keys, items_in_this_batch); cudaDeviceSynchronize(); auto insert_end = std::chrono::high_resolution_clock::now(); insert_diff[i] = insert_end-insert_start; cudaMemcpy(dev_keys, host_keys+start_of_batch, items_in_this_batch*sizeof(Key), cudaMemcpyHostToDevice); //cudaMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto query_start = std::chrono::high_resolution_clock::now(); bloom_filter.retrieve(dev_keys, items_in_this_batch, dev_vals); cudaDeviceSynchronize(); auto query_end = std::chrono::high_resolution_clock::now(); //return; query_diff[i] = query_end - query_start; count_bf_misses<<<(items_in_this_batch-1)/1024 +1, 1024>>>(dev_vals, items_in_this_batch, &misses[0]); cudaMemcpy(dev_keys, fp_keys+start_of_batch, items_in_this_batch*sizeof(Key), cudaMemcpyHostToDevice); //cudaMemcpy(dev_vals, host_vals+start_of_batch, items_in_this_batch*sizeof(Val), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto fp_start = std::chrono::high_resolution_clock::now(); bloom_filter.retrieve(dev_keys, items_in_this_batch, dev_vals); cudaDeviceSynchronize(); auto fp_end = std::chrono::high_resolution_clock::now(); fp_diff[i] = fp_end-fp_start; count_bf_misses<<<(items_in_this_batch-1)/1024 +1, 1024>>>(dev_vals, items_in_this_batch, &misses[1]); cudaDeviceSynchronize(); cudaFree(dev_keys); cudaFree(dev_vals); } cudaDeviceSynchronize(); //Filter::free_on_device(test_filter); free(host_keys); //free(host_vals); free(fp_keys); //free pieces //time to output printf("%llu %llu %f %llu %f %f \n", nitems, misses[0], 1.0*(misses[0])/nitems, misses[1], 1.0*misses[1]/nitems, 1.0 - 1.0*misses[1]/nitems); std::chrono::duration<double> summed_insert_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_insert_diff += insert_diff[i]; } std::chrono::duration<double> summed_query_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_query_diff += query_diff[i]; } std::chrono::duration<double> summed_fp_diff = std::chrono::nanoseconds::zero(); for (int i =0; i < num_batches;i++){ summed_fp_diff += fp_diff[i]; } std::string insert_file = filename + "_" + std::to_string(num_bits) + "_insert.txt"; std::string query_file = filename + "_" + std::to_string(num_bits) + "_lookup.txt"; std::string fp_file = filename + "_" + std::to_string(num_bits) + "_fp.txt"; //agg files std::string agg_insert_file = filename + "_aggregate_inserts.txt"; std::string agg_lookup_file = filename + "_aggregate_lookup.txt"; std::string agg_fp_file = filename + "_aggregate_fp.txt"; // std::cout << insert_file << std::endl; FILE *fp_insert = fopen(insert_file.c_str(), "w"); FILE *fp_lookup = fopen(query_file.c_str(), "w"); FILE *fp_false_lookup = fopen(fp_file.c_str(), "w"); FILE *fp_agg_insert; FILE *fp_agg_lookup; FILE *fp_agg_fp; if (first_file){ fp_agg_insert = fopen(agg_insert_file.c_str(), "w"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "w"); fp_agg_fp = fopen(agg_fp_file.c_str(), "w"); } else { fp_agg_insert = fopen(agg_insert_file.c_str(), "a"); fp_agg_lookup = fopen(agg_lookup_file.c_str(), "a"); fp_agg_fp = fopen(agg_fp_file.c_str(), "a"); } if (fp_insert == NULL) { printf("Can't open the data file %s\n", insert_file); exit(1); } if (fp_lookup == NULL ) { printf("Can't open the data file %s\n", query_file); exit(1); } if (fp_false_lookup == NULL) { printf("Can't open the data file %s\n", fp_file); exit(1); } if (fp_agg_insert == NULL || fp_agg_lookup == NULL || fp_agg_fp == NULL) { printf("Can't open the aggregate files for %s-%d\n", filename, num_bits); exit(1); } //inserts //const uint64_t scaling_factor = 1ULL; const uint64_t scaling_factor = 1000000ULL; std::cout << "Writing results to file: " << insert_file << std::endl; fprintf(fp_insert, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_insert, "%d", i*100/num_batches); fprintf(fp_insert, " %f\n", batch_amount[i]/(scaling_factor*insert_diff[i].count())); } //queries std::cout << "Writing results to file: " << query_file << std::endl; fprintf(fp_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_lookup, "%d", i*100/num_batches); fprintf(fp_lookup, " %f\n", batch_amount[i]/(scaling_factor*query_diff[i].count())); } std::cout << "Writing results to file: " << fp_file << std::endl; fprintf(fp_false_lookup, "x_0 y_0\n"); for (int i = 0; i < num_batches; i++){ fprintf(fp_false_lookup, "%d", i*100/num_batches); fprintf(fp_false_lookup, " %f\n", batch_amount[i]/(scaling_factor*fp_diff[i].count())); } if (first_file){ fprintf(fp_agg_insert, "x_0 y_0\n"); fprintf(fp_agg_lookup, "x_0 y_0\n"); fprintf(fp_agg_fp, "x_0 y_0\n"); } // fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(1000ULL*summed_insert_diff.count())); // fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(1000ULL*summed_query_diff.count())); // fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(1000ULL*summed_fp_diff.count())); fprintf(fp_agg_insert, "%d %f\n", num_bits, nitems/(scaling_factor*summed_insert_diff.count())); fprintf(fp_agg_lookup, "%d %f\n", num_bits, nitems/(scaling_factor*summed_query_diff.count())); fprintf(fp_agg_fp, "%d %f\n", num_bits, nitems/(scaling_factor*summed_fp_diff.count())); fclose(fp_insert); fclose(fp_lookup); fclose(fp_false_lookup); fclose(fp_agg_insert); fclose(fp_agg_lookup); fclose(fp_agg_fp); return; } __host__ void test_first_fail(uint64_t num_bits){ //tcqf_find_first_fill<tcqf, uint16_t, uint16_t>(num_bits); //tcqf_find_first_fill<tcqf_no_back, uint64_t, uint16_t>(num_bits); } int main(int argc, char** argv) { // poggers::sizing::size_in_num_slots<1> first_size_20(1ULL << 20); // printf("2^20\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_20); // poggers::sizing::size_in_num_slots<1> first_size_22(1ULL << 22); // printf("2^22\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_22); // poggers::sizing::size_in_num_slots<1> first_size_24(1ULL << 24); // printf("2^24\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_24); // poggers::sizing::size_in_num_slots<1> first_size_26(1ULL << 26); // printf("2^26\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_26); // poggers::sizing::size_in_num_slots<1> first_size_28(1ULL << 28); // printf("2^28\n"); // test_speed<table_type, uint64_t, uint64_t>(&first_size_28); // printf("alt table\n"); // poggers::sizing::size_in_num_slots<1>half_split_20(6000); // test_speed<p2_table, key_type, val_type>(&half_split_20); // test_speed<small_double_type, uint64_t, uint64_t>(&half_split_22); // poggers::sizing::size_in_num_slots<2>half_split_24(1ULL << 23, 1ULL << 23); // test_speed<small_double_type, uint64_t, uint64_t>(&half_split_24); // poggers::sizing::size_in_num_slots<2>half_split_26(1ULL << 25, 1ULL << 25); // test_speed<small_double_type, uint64_t, uint64_t>(&half_split_26); // printf("P2 tiny table\n"); // poggers::sizing::size_in_num_slots<1>half_split_28(1ULL << 28); // test_speed<p2_table, key_type, val_type>(&half_split_28); // poggers::sizing::variadic_size size(100000,100); // tcqf * test_tcqf = tcqf::generate_on_device(&size, 42); // cudaDeviceSynchronize(); // tcqf::free_on_device(test_tcqf); // warpcore_bloom my_filter((1ULL << 20), 7); // test_bloom_speed("bloom_results/test", 20, 20, true); test_bloom_speed("bloom_results/test", 22, 20, true); test_bloom_speed("bloom_results/test", 24, 20, false); test_bloom_speed("bloom_results/test", 26, 20, false); test_bloom_speed("bloom_results/test", 28, 20, false); test_bloom_speed("bloom_results/test", 30, 20, false); // test_tcf_speed("results/test", 20, 20, true); test_tcf_speed("results/test", 22, 20, true); test_tcf_speed("results/test", 24, 20, false); test_tcf_speed("results/test", 26, 20, false); test_tcf_speed("results/test", 28, 20, false); test_tcf_speed("results/test", 30, 20, false); // test_first_fail(22); // test_first_fail(24); // test_first_fail(26); // test_first_fail(28); // test_first_fail(30); return 0; }
5643574c613dc7fd867ba1be13c9d181e1de16f4.hip
// !!! This is a file automatically generated by hipify!!! #include "custom_cuda.h" #define DEBUG 1 bool ERROR_CHECK(hipError_t Status, string file, int line) { if(Status != hipSuccess) { printf("(EE) \n"); printf("(EE) Error detected in the LDPC decoder (%s : %d)\n", file.c_str(), line); printf("(EE) MSG: %s\n", hipGetErrorString(Status)); printf("(EE) \n"); exit(0); return false; } return true; } char* FilenamePtr(const char* filename){ char* fname = (char*)filename; char* ptr = fname; while( *fname != 0 ){ if( *fname == '\\' ) ptr = fname + 1; if( *fname == '/' ) ptr = fname + 1; fname += 1; } return ptr; } void CUDA_MALLOC_HOST(float** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(float); Status = hipHostMalloc(ptr, nbytes); #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_HOST(int** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(int); Status = hipHostMalloc(ptr, nbytes); #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_HOST(unsigned int** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(unsigned int); Status = hipHostMalloc(ptr, nbytes); #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } static size_t aDevice = 0; void CUDA_MALLOC_HOST(char** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(char); Status = hipHostMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(float** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(float); Status = hipMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(int** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(int); Status = hipMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(unsigned int** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(unsigned int); Status = hipMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(char** ptr, size_t nbElements, const char * file, int line){ hipError_t Status; size_t nbytes = nbElements * sizeof(char); Status = hipMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); }
5643574c613dc7fd867ba1be13c9d181e1de16f4.cu
#include "custom_cuda.h" #define DEBUG 1 bool ERROR_CHECK(cudaError_t Status, string file, int line) { if(Status != cudaSuccess) { printf("(EE) \n"); printf("(EE) Error detected in the LDPC decoder (%s : %d)\n", file.c_str(), line); printf("(EE) MSG: %s\n", cudaGetErrorString(Status)); printf("(EE) \n"); exit(0); return false; } return true; } char* FilenamePtr(const char* filename){ char* fname = (char*)filename; char* ptr = fname; while( *fname != 0 ){ if( *fname == '\\' ) ptr = fname + 1; if( *fname == '/' ) ptr = fname + 1; fname += 1; } return ptr; } void CUDA_MALLOC_HOST(float** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(float); Status = cudaMallocHost(ptr, nbytes); #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_HOST(int** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(int); Status = cudaMallocHost(ptr, nbytes); #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_HOST(unsigned int** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(unsigned int); Status = cudaMallocHost(ptr, nbytes); #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } static size_t aDevice = 0; void CUDA_MALLOC_HOST(char** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(char); Status = cudaMallocHost(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Host Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(float** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(float); Status = cudaMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(int** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(int); Status = cudaMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(unsigned int** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(unsigned int); Status = cudaMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); } void CUDA_MALLOC_DEVICE(char** ptr, size_t nbElements, const char * file, int line){ cudaError_t Status; size_t nbytes = nbElements * sizeof(char); Status = cudaMalloc(ptr, nbytes); aDevice += nbytes; #if DEBUG == 1 printf("(II) + Allocating (%s:%d) Device Memory, %ld elements (%ld ko) adr [%p, %p]\n", FilenamePtr(file), line, nbElements, nbytes/1024, *ptr, *ptr+nbElements-1); // printf("(II) + Memory allocated on GPU device = %d Mo\n", aDevice/1024/1024); #endif ERROR_CHECK(Status, __FILE__, __LINE__); }
10d9747b0c97995286e913549955c1b6ac5c2fa8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ {% set wdesc = "weighted" if weighted else "unweighted" %} #include "codegen/embedding_backward_template_helpers.cuh" {% if not dense %} constexpr int32_t kCacheLocationMissing = -1; {% endif %} enum { DEVICE = 0, MANAGED = 1, MANAGED_CACHING = 2, }; constexpr size_t kBackwardMaxThreads = 512; using namespace at; using namespace fbgemm_gpu; __global__ void split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_find_long_segments( const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_num_runs, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_run_lengths, PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> long_run_ids, PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> num_long_run_ids, int32_t max_segment_length_per_warp) { const int32_t num_runs = sorted_linear_indices_num_runs[0]; for (auto run_id = blockIdx.x * blockDim.x + threadIdx.x; run_id < num_runs; run_id += blockDim.x * gridDim.x) { if (sorted_linear_indices_run_lengths[run_id] >= max_segment_length_per_warp) { auto long_run_idx = gpuAtomicIncrement(&num_long_run_ids[0]); long_run_ids[long_run_idx] = run_id; } } } template < typename emb_t, typename cache_t, size_t kMaxVecsPerThread> __global__ void __launch_bounds__(kMaxThreads) split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1( const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits> grad_output, PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights, {% if not dense %} PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> weights_placements, {% endif %} const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> hash_size_cumsum, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> sorted_linear_indices_run, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> long_run_ids, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> num_long_run_ids, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos, {% if not dense %} const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights, {% endif %} {% if not dense %} bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args, {% else %} PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights, {% endif %} FixedDivisor fd, {{ args.split_kernel_args | join(", ") }}) { int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); const int32_t num_long_runs = num_long_run_ids[0]; for (int32_t long_run_id = blockIdx.x; long_run_id < num_long_runs; long_run_id += gridDim.x) { int32_t current_run_id = long_run_ids[long_run_id]; const int64_t linear_index = sorted_linear_indices_run[current_run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[current_run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[current_run_id + 1]; const int32_t SL = segment_end - segment_start; const int32_t warp_id = threadIdx.y; const int32_t lane_id = threadIdx.x; // Note that with shared embedding tables we can have multiple tables // (i.e. different values of `t` sharing the same segment). // const auto info_0 = sorted_infos[segment_start]; int32_t t_0 = fd.Div(info_0); //info_0 / B; int64_t hash_size = hash_size_cumsum[t_0]; int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = SL_per_warp * warp_id; const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL); Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0; {% if weighted %} acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j); int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j); {% if weighted %} acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].acc.x += grad_out_vec.acc.x; grad_sum[i].acc.y += grad_out_vec.acc.y; grad_sum[i].acc.z += grad_out_vec.acc.z; grad_sum[i].acc.w += grad_out_vec.acc.w; {% endif %} } } } // do shared memory reduction only if we used multiple blocks. if (SL > SL_per_warp) { struct SharedMemory<Vec4T<acc_type<cache_t, true>>> smem; Vec4T<acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer(); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = grad_sum[i]; } __syncthreads(); if (blockDim.y >= 32) { if (warp_id < 16) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 16) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 16) { if (warp_id < 8) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 8) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 8) { if (warp_id < 4) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 4) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 4) { if (warp_id < 2) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 2) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (warp_id == 0) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { grad_sum[i] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 1) * kMaxVecsPerThread * kWarpSize]); } } } if (warp_id == 0) { int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = weights_placements[t_0]; if (weights_placement == DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0]; int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer; Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + current_run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[lane_id + i * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate qparams from updated weight row qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(shared_weight_update_row, D); weight_row_template.store_qparams(qparams_new); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[lane_id + i * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } } } template < typename emb_t, typename cache_t, size_t kMaxVecsPerThread> __global__ __launch_bounds__(kBackwardMaxThreads) void split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1( const PackedTensorAccessor32<acc_type<cache_t,true>, 2, RestrictPtrTraits> grad_output, PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights, {% if not dense %} PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> weights_placements, {% endif %} const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> hash_size_cumsum, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> sorted_linear_indices_run, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos, {% if not dense %} const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights, {% endif %} const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_num_runs, int32_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args, {% else %} PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights, {% endif %} FixedDivisor fd, {{ args.split_kernel_args | join(", ") }}) { const int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); const int32_t run_id = blockIdx.x * blockDim.y + threadIdx.y; if (run_id >= sorted_linear_indices_run.size(0)) { return; } if (run_id >= sorted_linear_indices_num_runs[0]) { return; } const int64_t linear_index = sorted_linear_indices_run[run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[run_id + 1]; const int32_t SL = segment_end - segment_start; if (SL >= max_segment_length_per_warp) { return; } // now, each segment corresponds to exactly one table `t` and row in // that table (`idx`). Thus, we can hoist out some of the book-keeping. const auto info_0 = sorted_infos[segment_start]; int32_t t_0 = fd.Div(info_0); // info_0 / B; int64_t hash_size = hash_size_cumsum[t_0]; int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = 0; const int32_t sl_end = SL; Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = D_offsets[t]; {% if weighted %} acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j); int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j); {% if weighted %} acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].acc.x += grad_out_vec.acc.x; grad_sum[i].acc.y += grad_out_vec.acc.y; grad_sum[i].acc.z += grad_out_vec.acc.z; grad_sum[i].acc.w += grad_out_vec.acc.w; {% endif %} } } } int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = weights_placements[t_0]; if (weights_placement == DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0]; int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer; Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights){ qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate new qparams after row update qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kWarpSize], D); weight_row_template.store_qparams(qparams_new); // fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } template <typename cache_t, typename emb_t> __global__ void __launch_bounds__(kMaxThreads) grad_mean_kernel( const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits> grad_output, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> offsets, PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits> grad_output_mean) { int32_t B = grad_output.size(0); int32_t T = D_offsets.size(0) - 1; int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b = b_t % B; int32_t t = b_t / B; if (b_t >= B * T) { return; } int32_t D_start = D_offsets[t]; int32_t D_end = D_offsets[t + 1]; int32_t D = D_end - D_start; int64_t indices_start = offsets[t * B + b]; int64_t indices_end = offsets[t * B + b + 1]; int32_t L = indices_end - indices_start; if (L != 0) { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.acc.x /= L; grad_out_vec.acc.y /= L; grad_out_vec.acc.z /= L; grad_out_vec.acc.w /= L; grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } else { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } } {{ "void" if not dense else "Tensor" }} split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda( Tensor grad_output, Tensor dev_weights, {% if not dense %} Tensor uvm_weights, Tensor lxu_cache_weights, Tensor weights_placements, {% endif %} Tensor weights_offsets, Tensor D_offsets, int64_t max_D, Tensor hash_size_cumsum, int64_t total_hash_size_bits, Tensor indices, Tensor offsets, int64_t pooling_mode, {% if weighted %} Tensor indice_weights, {% endif %} {% if not dense %} Tensor lxu_cache_locations, {% endif %} int64_t unused_, int64_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, {% endif %} {{ args.split_function_args | join(", ") }}) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(dev_weights.get_device()); {% if dense %} auto grad_dev_weights = zeros_like(dev_weights); {% endif %} // short-circuit if there are zero indices. if (indices.numel() == 0) { return {{ "grad_dev_weights" if dense else "" }}; } int32_t T = D_offsets.numel() - 1; TORCH_CHECK(T > 0); // offsets = [B x T + 1] const auto B = (offsets.size(0) - 1) / T; TORCH_CHECK(B > 0); auto BT_block_size = kMaxThreads / kWarpSize; TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads); TORCH_CHECK(max_D <= {{ max_embedding_dim }}); // V100: 96 KB; A100: 160 KB. int max_shared_bytes = 0; hipDeviceGetAttribute(&max_shared_bytes, hipDeviceAttributeSharedMemPerBlockOptin, dev_weights.get_device()); C10_HIP_KERNEL_LAUNCH_CHECK(); int shared_kb = max_shared_bytes >> 10; // V100: 64 KB; A100: 96 KB. // Use 2/3 of the available GPU shared mem; leave rooms for L1$. int used_shared_kb = round_down(shared_kb * 2 / 3, 16); TORCH_CHECK(used_shared_kb > 0); int used_shared_bytes = used_shared_kb << 10; auto infos = at::empty_like(indices, indices.options().dtype(kInt)); auto infos_sorted = at::empty_like(infos); auto linear_indices = at::empty_like(indices); auto linear_indices_sorted = at::empty_like(indices); hipLaunchKernelGGL(( linearize_index_kernel), dim3(div_round_up(B * T, kMaxThreads)), dim3(kMaxThreads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), infos.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), linear_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), infos.data_ptr<int32_t>(), infos_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), infos.data_ptr<int32_t>(), infos_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } {% if not dense %} auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations); if (lxu_cache_locations.size(0) > 0) { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } {% endif %} auto sorted_linear_indices_run = at::empty_like(indices); auto sorted_linear_indices_run_lengths = at::zeros_like(indices, indices.options().dtype(kInt)); auto sorted_linear_indices_num_runs = at::zeros({1}, indices.options().dtype(kInt)); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode( nullptr, temp_storage_bytes, linear_indices_sorted.data_ptr<int64_t>(), sorted_linear_indices_run.data_ptr<int64_t>(), sorted_linear_indices_run_lengths.data_ptr<int32_t>(), sorted_linear_indices_num_runs.data_ptr<int32_t>(), linear_indices_sorted.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); // Allocate temporary storage auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); // Run encoding AT_CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode( temp_storage.data_ptr(), temp_storage_bytes, linear_indices_sorted.data_ptr<int64_t>(), sorted_linear_indices_run.data_ptr<int64_t>(), sorted_linear_indices_run_lengths.data_ptr<int32_t>(), sorted_linear_indices_num_runs.data_ptr<int32_t>(), linear_indices_sorted.numel(), at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); } auto sorted_linear_indices_cumulative_run_lengths = asynchronous_complete_cumsum(sorted_linear_indices_run_lengths); {% if not dense %} DISPATCH_EMB_CACHE_TYPES( {% else %} AT_DISPATCH_FLOATING_TYPES_AND_HALF( {% endif %} dev_weights.type(), {% if not dense %} lxu_cache_weights.type(), {% endif %} "split_embedding_backward_{{ optimizer }}_exact_kernel", ([&] { {% if weighted %} auto indice_weights_sorted = at::empty_like(indice_weights); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), false)); } {% endif %} auto grad_output_accessor = grad_output.packed_accessor32< acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 2, RestrictPtrTraits>(); Tensor grad_output_mean; if (pooling_mode == MEAN) { grad_output_mean = at::empty_like(grad_output); hipLaunchKernelGGL(( grad_mean_kernel<{{ "scalar_t, scalar_t" if dense else "cache_t, emb_t" }}>) , dim3(div_round_up((B * T), kMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_accessor, D_offsets .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), grad_output_mean.packed_accessor32< acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 2, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); grad_output_accessor = grad_output_mean.packed_accessor32< acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 2, RestrictPtrTraits>(); } {% if not dense %} PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } {% endif %} {% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %} if (max_D <= {{ 128 * kMaxVecsPerThread }}) { // Stay under used_shared_kb of shared memory (V100: 64 KB; A100: 96 KB), BT_block_size must be a power of two. while (BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }} >= used_shared_bytes) { BT_block_size /= 2; } TORCH_CHECK(BT_block_size >= 1); if (std::is_same<{{ "scalar_t" if dense else "emb_t" }}, double>::value) { // Otherwise we see CUDA kernel launch failures despite the above checks. BT_block_size = 1; } auto long_run_ids = at::empty_like(sorted_linear_indices_run_lengths); auto num_long_run_ids = at::zeros({1}, indices.options().dtype(kLong)); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_find_long_segments), dim3(div_round_up(sorted_linear_indices_run_lengths.numel(), kMaxThreads)), dim3(kMaxThreads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() , sorted_linear_indices_num_runs.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run_lengths.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), max_segment_length_per_warp); C10_HIP_KERNEL_LAUNCH_CHECK(); // Check https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-7-x // "Compute capability 7.x devices allow a single thread block to // address the full capacity of shared memory: 96 KB on Volta, // 64 KB on Turing. Kernels relying on shared memory allocations // over 48 KB per block are architecture-specific, as such they // must use dynamic shared memory (rather than statically sized // arrays) and require an explicit opt-in using hipFuncSetAttribute()". hipFuncSetAttribute( split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, hipFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. C10_HIP_KERNEL_LAUNCH_CHECK(); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_cta_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>) , dim3(div_round_up(linear_indices.numel(), 32 * kWarpSize)), dim3(dim3(kWarpSize, BT_block_size)), BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(), {% endif %} {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} FixedDivisor(B), {{ args.split_kernel_arg_constructors | join(", ") }}); C10_HIP_KERNEL_LAUNCH_CHECK(); hipFuncSetAttribute( split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, hipFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. C10_HIP_KERNEL_LAUNCH_CHECK(); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_warp_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>) , dim3(div_round_up(linear_indices.numel(), kBackwardMaxThreads / kWarpSize)), dim3(dim3(kWarpSize, kBackwardMaxThreads / kWarpSize)), BT_block_size * sizeof( acc_type< {% if not dense %} cache_t {% else %} scalar_t {% endif %}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(), {% endif %} sorted_linear_indices_num_runs .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), max_segment_length_per_warp, {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} FixedDivisor(B), {{ args.split_kernel_arg_constructors | join(", ") }}); C10_HIP_KERNEL_LAUNCH_CHECK(); return; } {% endfor %} })); return {{ "grad_dev_weights" if dense else "" }}; }
10d9747b0c97995286e913549955c1b6ac5c2fa8.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ {% set wdesc = "weighted" if weighted else "unweighted" %} #include "codegen/embedding_backward_template_helpers.cuh" {% if not dense %} constexpr int32_t kCacheLocationMissing = -1; {% endif %} enum { DEVICE = 0, MANAGED = 1, MANAGED_CACHING = 2, }; constexpr size_t kBackwardMaxThreads = 512; using namespace at; using namespace fbgemm_gpu; __global__ void split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_find_long_segments( const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_num_runs, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_run_lengths, PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> long_run_ids, PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> num_long_run_ids, int32_t max_segment_length_per_warp) { const int32_t num_runs = sorted_linear_indices_num_runs[0]; for (auto run_id = blockIdx.x * blockDim.x + threadIdx.x; run_id < num_runs; run_id += blockDim.x * gridDim.x) { if (sorted_linear_indices_run_lengths[run_id] >= max_segment_length_per_warp) { auto long_run_idx = gpuAtomicIncrement(&num_long_run_ids[0]); long_run_ids[long_run_idx] = run_id; } } } template < typename emb_t, typename cache_t, size_t kMaxVecsPerThread> __global__ void __launch_bounds__(kMaxThreads) split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1( const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits> grad_output, PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights, {% if not dense %} PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> weights_placements, {% endif %} const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> hash_size_cumsum, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> sorted_linear_indices_run, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> long_run_ids, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> num_long_run_ids, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos, {% if not dense %} const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights, {% endif %} {% if not dense %} bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args, {% else %} PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights, {% endif %} FixedDivisor fd, {{ args.split_kernel_args | join(", ") }}) { int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); const int32_t num_long_runs = num_long_run_ids[0]; for (int32_t long_run_id = blockIdx.x; long_run_id < num_long_runs; long_run_id += gridDim.x) { int32_t current_run_id = long_run_ids[long_run_id]; const int64_t linear_index = sorted_linear_indices_run[current_run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[current_run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[current_run_id + 1]; const int32_t SL = segment_end - segment_start; const int32_t warp_id = threadIdx.y; const int32_t lane_id = threadIdx.x; // Note that with shared embedding tables we can have multiple tables // (i.e. different values of `t` sharing the same segment). // const auto info_0 = sorted_infos[segment_start]; int32_t t_0 = fd.Div(info_0); //info_0 / B; int64_t hash_size = hash_size_cumsum[t_0]; int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = SL_per_warp * warp_id; const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL); Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0; {% if weighted %} acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j); int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j); {% if weighted %} acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].acc.x += grad_out_vec.acc.x; grad_sum[i].acc.y += grad_out_vec.acc.y; grad_sum[i].acc.z += grad_out_vec.acc.z; grad_sum[i].acc.w += grad_out_vec.acc.w; {% endif %} } } } // do shared memory reduction only if we used multiple blocks. if (SL > SL_per_warp) { struct SharedMemory<Vec4T<acc_type<cache_t, true>>> smem; Vec4T<acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer(); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = grad_sum[i]; } __syncthreads(); if (blockDim.y >= 32) { if (warp_id < 16) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 16) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 16) { if (warp_id < 8) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 8) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 8) { if (warp_id < 4) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 4) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (blockDim.y >= 4) { if (warp_id < 2) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 2) * kMaxVecsPerThread * kWarpSize]); } } __syncthreads(); } if (warp_id == 0) { #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { grad_sum[i] = vec4_acc( shared_grad_sums [lane_id + i * kWarpSize + warp_id * kMaxVecsPerThread * kWarpSize], shared_grad_sums [lane_id + i * kWarpSize + (warp_id + 1) * kMaxVecsPerThread * kWarpSize]); } } } if (warp_id == 0) { int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = weights_placements[t_0]; if (weights_placement == DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0]; int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer; Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + current_run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[lane_id + i * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate qparams from updated weight row qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(shared_weight_update_row, D); weight_row_template.store_qparams(qparams_new); #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[lane_id + i * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } } } template < typename emb_t, typename cache_t, size_t kMaxVecsPerThread> __global__ __launch_bounds__(kBackwardMaxThreads) void split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1( const PackedTensorAccessor32<acc_type<cache_t,true>, 2, RestrictPtrTraits> grad_output, PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights, {% if not dense %} PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights, PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> weights_placements, {% endif %} const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> hash_size_cumsum, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> sorted_linear_indices_run, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_cumulative_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_run_lengths, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos, {% if not dense %} const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_lxu_cache_locations, {% endif %} {% if weighted %} const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights, {% endif %} const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_linear_indices_num_runs, int32_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, PhiloxCudaState stochastic_rounding_philox_args, {% else %} PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights, {% endif %} FixedDivisor fd, {{ args.split_kernel_args | join(", ") }}) { const int32_t T = D_offsets.size(0) - 1; const int32_t B = grad_output.size(0); const int32_t run_id = blockIdx.x * blockDim.y + threadIdx.y; if (run_id >= sorted_linear_indices_run.size(0)) { return; } if (run_id >= sorted_linear_indices_num_runs[0]) { return; } const int64_t linear_index = sorted_linear_indices_run[run_id]; const int32_t segment_start = sorted_linear_indices_cumulative_run_lengths[run_id]; const int32_t segment_end = sorted_linear_indices_cumulative_run_lengths[run_id + 1]; const int32_t SL = segment_end - segment_start; if (SL >= max_segment_length_per_warp) { return; } // now, each segment corresponds to exactly one table `t` and row in // that table (`idx`). Thus, we can hoist out some of the book-keeping. const auto info_0 = sorted_infos[segment_start]; int32_t t_0 = fd.Div(info_0); // info_0 / B; int64_t hash_size = hash_size_cumsum[t_0]; int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0]; int64_t idx = linear_index - hash_size; const int32_t SL_per_warp = div_round_up(SL, blockDim.y); const int32_t sl_start = 0; const int32_t sl_end = SL; Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread]; for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) { int32_t sl_j = sl + threadIdx.x; int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0; int32_t b; //= b_t % B; int32_t t; //= b_t / B; fd.DivMod(b_t, &t, &b); int32_t D_start = D_offsets[t]; {% if weighted %} acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0; {% endif %} for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) { int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j); int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j); {% if weighted %} acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j); {% endif %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> grad_out_vec( &grad_output[b_j][0] + D_start_j + d); {% if weighted %} grad_sum[i].fma_(grad_out_vec, idx_weight_j); {% else %} grad_sum[i].acc.x += grad_out_vec.acc.x; grad_sum[i].acc.y += grad_out_vec.acc.y; grad_sum[i].acc.z += grad_out_vec.acc.z; grad_sum[i].acc.w += grad_out_vec.acc.w; {% endif %} } } } int64_t weights_offset = weights_offsets[t_0]; {% if not dense %} emb_t* __restrict__ weights{nullptr}; cache_t* __restrict__ cache_weights{nullptr}; int32_t D_emb = D; if (std::is_same<emb_t, uint8_t>::value) { D_emb += kINT8QparamsBytes; } const auto weights_placement = weights_placements[t_0]; if (weights_placement == DEVICE) { weights = &dev_weights[weights_offset + idx * D_emb]; } else { weights = &uvm_weights[weights_offset + idx * D_emb]; } if (weights_placement == MANAGED_CACHING) { int32_t cache_idx = sorted_lxu_cache_locations[segment_start]; if (cache_idx != kCacheLocationMissing) { cache_weights = &lxu_cache_weights[cache_idx][0]; } } {% for tensor in args.split_tensors %} acc_type<cache_t, true>* __restrict__ {{ tensor }}; const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0]; int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0]; if ({{ tensor }}_placement == DEVICE) { {{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset]; } else { {{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset]; } {% endfor %} struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer; Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer(); auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr); if (!std::is_same<emb_t, float>::value && stochastic_rounding) { StochasticRoundingRNGState state; // different for every *run* and every *thread*. auto stochastic_rounding_seeds = at::cuda::philox::unpack(stochastic_rounding_philox_args); stochastic_rounding_init( std::get<0>(stochastic_rounding_seeds) ^ std::get<1>(stochastic_rounding_seeds), threadIdx.x + run_id * blockDim.x, &state); weight_row_template.set_stoc_state(&state); } float2 qparams_template; if (std::is_same<emb_t, uint8_t>::value && !cache_weights){ qparams_template = weight_row_template.load_qparams(); } {{ split_precomputation }} float2 qparams_new; #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template); auto& grad = grad_sum[i]; {{ split_weight_update }} if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize] = weight_new; } else { weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8 } } if (std::is_same<emb_t, uint8_t>::value && !cache_weights) { // calculate new qparams after row update qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kWarpSize], D); weight_row_template.store_qparams(qparams_new); // fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; weight_row_template.store(shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize], d, qparams_new); } } {% else %} #pragma unroll kMaxVecsPerThread for (int32_t i = 0; i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D; ++i) { int32_t d = 4 * kWarpSize * i + threadIdx.x * 4; auto& grad = grad_sum[i]; grad.store(&grad_dev_weights[weights_offset + idx * D + d]); } {% endif %} } template <typename cache_t, typename emb_t> __global__ void __launch_bounds__(kMaxThreads) grad_mean_kernel( const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits> grad_output, const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets, const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> offsets, PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits> grad_output_mean) { int32_t B = grad_output.size(0); int32_t T = D_offsets.size(0) - 1; int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y; int32_t b = b_t % B; int32_t t = b_t / B; if (b_t >= B * T) { return; } int32_t D_start = D_offsets[t]; int32_t D_end = D_offsets[t + 1]; int32_t D = D_end - D_start; int64_t indices_start = offsets[t * B + b]; int64_t indices_end = offsets[t * B + b + 1]; int32_t L = indices_end - indices_start; if (L != 0) { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.acc.x /= L; grad_out_vec.acc.y /= L; grad_out_vec.acc.z /= L; grad_out_vec.acc.w /= L; grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } else { for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) { Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]); grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]); } } } {{ "void" if not dense else "Tensor" }} split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda( Tensor grad_output, Tensor dev_weights, {% if not dense %} Tensor uvm_weights, Tensor lxu_cache_weights, Tensor weights_placements, {% endif %} Tensor weights_offsets, Tensor D_offsets, int64_t max_D, Tensor hash_size_cumsum, int64_t total_hash_size_bits, Tensor indices, Tensor offsets, int64_t pooling_mode, {% if weighted %} Tensor indice_weights, {% endif %} {% if not dense %} Tensor lxu_cache_locations, {% endif %} int64_t unused_, int64_t max_segment_length_per_warp, {% if not dense %} bool stochastic_rounding, {% endif %} {{ args.split_function_args | join(", ") }}) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(dev_weights.get_device()); {% if dense %} auto grad_dev_weights = zeros_like(dev_weights); {% endif %} // short-circuit if there are zero indices. if (indices.numel() == 0) { return {{ "grad_dev_weights" if dense else "" }}; } int32_t T = D_offsets.numel() - 1; TORCH_CHECK(T > 0); // offsets = [B x T + 1] const auto B = (offsets.size(0) - 1) / T; TORCH_CHECK(B > 0); auto BT_block_size = kMaxThreads / kWarpSize; TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads); TORCH_CHECK(max_D <= {{ max_embedding_dim }}); // V100: 96 KB; A100: 160 KB. int max_shared_bytes = 0; cudaDeviceGetAttribute(&max_shared_bytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, dev_weights.get_device()); C10_CUDA_KERNEL_LAUNCH_CHECK(); int shared_kb = max_shared_bytes >> 10; // V100: 64 KB; A100: 96 KB. // Use 2/3 of the available GPU shared mem; leave rooms for L1$. int used_shared_kb = round_down(shared_kb * 2 / 3, 16); TORCH_CHECK(used_shared_kb > 0); int used_shared_bytes = used_shared_kb << 10; auto infos = at::empty_like(indices, indices.options().dtype(kInt)); auto infos_sorted = at::empty_like(infos); auto linear_indices = at::empty_like(indices); auto linear_indices_sorted = at::empty_like(indices); linearize_index_kernel<<< div_round_up(B * T, kMaxThreads), kMaxThreads, 0, at::cuda::getCurrentCUDAStream()>>>( hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), infos.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), linear_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), infos.data_ptr<int32_t>(), infos_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), infos.data_ptr<int32_t>(), infos_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); } {% if not dense %} auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations); if (lxu_cache_locations.size(0) > 0) { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), lxu_cache_locations.data_ptr<int32_t>(), lxu_cache_locations_sorted.data_ptr<int32_t>(), linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); } {% endif %} auto sorted_linear_indices_run = at::empty_like(indices); auto sorted_linear_indices_run_lengths = at::zeros_like(indices, indices.options().dtype(kInt)); auto sorted_linear_indices_num_runs = at::zeros({1}, indices.options().dtype(kInt)); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DeviceRunLengthEncode::Encode( nullptr, temp_storage_bytes, linear_indices_sorted.data_ptr<int64_t>(), sorted_linear_indices_run.data_ptr<int64_t>(), sorted_linear_indices_run_lengths.data_ptr<int32_t>(), sorted_linear_indices_num_runs.data_ptr<int32_t>(), linear_indices_sorted.numel(), at::cuda::getCurrentCUDAStream())); // Allocate temporary storage auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); // Run encoding AT_CUDA_CHECK(cub::DeviceRunLengthEncode::Encode( temp_storage.data_ptr(), temp_storage_bytes, linear_indices_sorted.data_ptr<int64_t>(), sorted_linear_indices_run.data_ptr<int64_t>(), sorted_linear_indices_run_lengths.data_ptr<int32_t>(), sorted_linear_indices_num_runs.data_ptr<int32_t>(), linear_indices_sorted.numel(), at::cuda::getCurrentCUDAStream())); } auto sorted_linear_indices_cumulative_run_lengths = asynchronous_complete_cumsum(sorted_linear_indices_run_lengths); {% if not dense %} DISPATCH_EMB_CACHE_TYPES( {% else %} AT_DISPATCH_FLOATING_TYPES_AND_HALF( {% endif %} dev_weights.type(), {% if not dense %} lxu_cache_weights.type(), {% endif %} "split_embedding_backward_{{ optimizer }}_exact_kernel", ([&] { {% if weighted %} auto indice_weights_sorted = at::empty_like(indice_weights); { size_t temp_storage_bytes = 0; AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); auto temp_storage = at::empty( {static_cast<int64_t>(temp_storage_bytes)}, indices.options().dtype(kByte)); AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs( temp_storage.data_ptr(), temp_storage_bytes, linear_indices.data_ptr<int64_t>(), linear_indices_sorted.data_ptr<int64_t>(), {% if not dense %} indice_weights.data_ptr<acc_type<cache_t, true>>(), indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(), {% else %} indice_weights.data_ptr<acc_type<scalar_t, true>>(), indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(), {% endif %} linear_indices.numel(), 0, total_hash_size_bits, at::cuda::getCurrentCUDAStream(), false)); } {% endif %} auto grad_output_accessor = grad_output.packed_accessor32< acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 2, RestrictPtrTraits>(); Tensor grad_output_mean; if (pooling_mode == MEAN) { grad_output_mean = at::empty_like(grad_output); grad_mean_kernel<{{ "scalar_t, scalar_t" if dense else "cache_t, emb_t" }}> <<<div_round_up((B * T), kMaxThreads / kWarpSize), dim3(kWarpSize, kMaxThreads / kWarpSize), 0, at::cuda::getCurrentCUDAStream()>>>( grad_output_accessor, D_offsets .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), offsets .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), grad_output_mean.packed_accessor32< acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 2, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); grad_output_accessor = grad_output_mean.packed_accessor32< acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 2, RestrictPtrTraits>(); } {% if not dense %} PhiloxCudaState rng_engine_inputs; if (stochastic_rounding && !std::is_same<emb_t, float>::value) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); std::lock_guard<std::mutex> lock(gen.mutex()); rng_engine_inputs = at::check_generator<at::CUDAGeneratorImpl>(gen) ->philox_cuda_state(4); } {% endif %} {% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %} if (max_D <= {{ 128 * kMaxVecsPerThread }}) { // Stay under used_shared_kb of shared memory (V100: 64 KB; A100: 96 KB), BT_block_size must be a power of two. while (BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }} >= used_shared_bytes) { BT_block_size /= 2; } TORCH_CHECK(BT_block_size >= 1); if (std::is_same<{{ "scalar_t" if dense else "emb_t" }}, double>::value) { // Otherwise we see CUDA kernel launch failures despite the above checks. BT_block_size = 1; } auto long_run_ids = at::empty_like(sorted_linear_indices_run_lengths); auto num_long_run_ids = at::zeros({1}, indices.options().dtype(kLong)); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_find_long_segments<<< div_round_up(sorted_linear_indices_run_lengths.numel(), kMaxThreads), kMaxThreads, 0, at::cuda::getCurrentCUDAStream() >>>( sorted_linear_indices_num_runs.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run_lengths.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), max_segment_length_per_warp); C10_CUDA_KERNEL_LAUNCH_CHECK(); // Check https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory-7-x // "Compute capability 7.x devices allow a single thread block to // address the full capacity of shared memory: 96 KB on Volta, // 64 KB on Turing. Kernels relying on shared memory allocations // over 48 KB per block are architecture-specific, as such they // must use dynamic shared memory (rather than statically sized // arrays) and require an explicit opt-in using cudaFuncSetAttribute()". cudaFuncSetAttribute( split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, cudaFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. C10_CUDA_KERNEL_LAUNCH_CHECK(); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}> <<<div_round_up(linear_indices.numel(), 32 * kWarpSize), dim3(kWarpSize, BT_block_size), BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::cuda::getCurrentCUDAStream()>>>( grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), long_run_ids.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), num_long_run_ids.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(), {% endif %} {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} FixedDivisor(B), {{ args.split_kernel_arg_constructors | join(", ") }}); C10_CUDA_KERNEL_LAUNCH_CHECK(); cudaFuncSetAttribute( split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}>, cudaFuncAttributeMaxDynamicSharedMemorySize, used_shared_bytes); // V100: 64 KB; A100: 96 KB. C10_CUDA_KERNEL_LAUNCH_CHECK(); split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1< {% if not dense %} emb_t, cache_t, {% else %} scalar_t, scalar_t, {% endif %} {{ kMaxVecsPerThread }}> <<<div_round_up(linear_indices.numel(), kBackwardMaxThreads / kWarpSize), dim3(kWarpSize, kBackwardMaxThreads / kWarpSize), BT_block_size * sizeof( acc_type< {% if not dense %} cache_t {% else %} scalar_t {% endif %}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }}, at::cuda::getCurrentCUDAStream()>>>( grad_output_accessor, {% if not dense %} dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(), lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(), weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% else %} dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run .packed_accessor32<int64_t, 1, RestrictPtrTraits>(), sorted_linear_indices_cumulative_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), sorted_linear_indices_run_lengths .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% if not dense %} lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(), {% endif %} {% if weighted %} indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(), {% endif %} sorted_linear_indices_num_runs .packed_accessor32<int32_t, 1, RestrictPtrTraits>(), max_segment_length_per_warp, {% if not dense %} stochastic_rounding, rng_engine_inputs, {% else %} grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), {% endif %} FixedDivisor(B), {{ args.split_kernel_arg_constructors | join(", ") }}); C10_CUDA_KERNEL_LAUNCH_CHECK(); return; } {% endfor %} })); return {{ "grad_dev_weights" if dense else "" }}; }
0ee1495e8a1d106fc378ced4c418418bb8c1689f.hip
// !!! This is a file automatically generated by hipify!!! /* 1. 2.1GPU 3.region query */ #include "dbscan.h" #include <math.h> #include <queue> #include<iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" using namespace std; /* // calculate eculidean distance of two 2-D points double euclidean_distance(Point a, Point b) { double x = a.x-b.x; double y = a.y-b.y; return sqrt(x*x+y*y); } // get neighborhood of point p and add it to neighborhood queue int region_query( vector<Point> &dataset, int p, queue<int> &neighborhood, double eps) { //int count = 0; for (int i = 0; i < dataset.size(); i++) { //cout << "regin_query" << count++ << endl; if(i!=p){ int dist = euclidean_distance(dataset[p],dataset[i]); if ( dist< eps) { neighborhood.push(i); } } } return (int)neighborhood.size(); } */ unsigned int total = 0; double* dev_nodeX; double* dev_nodeY; int* dev_result; int* dev_p; int *dev_label,*dev_elementsToAdd,*dev_elementsToOpe; int *dev_query_size; int **dev_total_query_result; double* dev_eps; int *dev_min_pts; int *dev_pointer; int *result; int datasize; int** total_query_result; int *label,*elementsToAdd,*elementsToOpe; //vector<int> *final_result; //int queuesize; /* __device__ int cal(double *dev_nodeX, double *dev_nodeY, int* dev_p, int* dev_i,double* dev_eps) { int i = *dev_i; if (i != *dev_p) { //int dist=euclidean_distance(dev_nodeX[i],dev_nodeY[i],dev_nodeX[*dev_p],dev_nodeY[*dev_p]); double x = dev_nodeX[i] - dev_nodeX[*dev_p]; double y = dev_nodeY[i] - dev_nodeY[*dev_p]; int dist = sqrt(x*x + y*y); //if (dist<*dev_eps) printf(" #%d (%.3f, %.3f) -> #%d(%.3f,%.3f) dist is %d\n",i, dev_nodeX[i], dev_nodeY[i], *dev_p, dev_nodeX[*dev_p], dev_nodeY[*dev_p], dist); if (dist<*dev_eps) { return 1; } } return 0; } */ __global__ void region_query_kernal(double *dev_nodeX, double *dev_nodeY,int* dev_query_size, int* dev_query_target, int *dev_pointer, double* dev_eps) { //printf("???"); int size = *dev_query_size; int tid = threadIdx.x; int bid = blockIdx.x; int index=bid*blockDim.x+tid; //printf("%d ah\n",index); int i=index/size;// int j=index%size;// int target = dev_query_target[i];// //int value=cal(dev_nodeX,dev_nodeY,&target,&j,dev_eps);//10 int value=0; if (target != j) { //int dist=euclidean_distance(dev_nodeX[i],dev_nodeY[i],dev_nodeX[*dev_p],dev_nodeY[*dev_p]); double x = dev_nodeX[j] - dev_nodeX[target]; double y = dev_nodeY[j] - dev_nodeY[target]; int dist = sqrt(x*x + y*y); //if (dist<*dev_eps) printf(" #%d (%.3f, %.3f) -> #%d(%.3f,%.3f) dist is %d\n",i, dev_nodeX[i], dev_nodeY[i], *dev_p, dev_nodeX[*dev_p], dev_nodeY[*dev_p], dist); if (dist<*dev_eps) { value=1; } } if(value){//-99991 //printf("???\n"); dev_pointer[index]=-9999; } else{ dev_pointer[index]=1; } } void pral_query(int*query_target,int**total_query_result, double eps) { //int *query_target;// //int **query_result;// int *pointer; //hipError_t cudaStatus; int *dev_query_target; //int **dev_query_result; //hipMalloc((void***)&dev_query_result, datasize*sizeof(int*)); //query_result=(int**)malloc(datasize*sizeof(int*)); pointer=(int*)malloc(datasize*datasize*sizeof(int)); /* for(int i=0;i<datasize;i++) { query_result[i]=dev_pointer+i*datasize; } */ //hipMemcpy(dev_query_result, query_result, datasize*sizeof(int*), hipMemcpyHostToDevice); hipMalloc((void**)&dev_query_target, datasize*sizeof(int)); hipMemcpy(dev_query_target, query_target, datasize*sizeof(int), hipMemcpyHostToDevice); //printf("step into kernal\n"); hipLaunchKernelGGL(( region_query_kernal), dim3((datasize*datasize+511)/512),dim3(512), 0, 0, dev_nodeX, dev_nodeY,dev_query_size, dev_query_target, dev_pointer, dev_eps); //printf("step out of kernal\n"); //hipError_t error = hipGetLastError(); //printf("CUDA error: %s\n", hipGetErrorString(error)); hipDeviceSynchronize(); hipMemcpy(pointer, dev_pointer, datasize*datasize*sizeof(int), hipMemcpyDeviceToHost); for(int p=0;p<datasize*datasize;p++) { int i=p/datasize; int j=p%datasize; total_query_result[i][j]=pointer[p]; } //free(query_result); free(pointer); hipFree(dev_query_target); //hipFree(dev_query_result); } __global__ void expand_cluster_kernal(int *dev_query_size,int *dev_pointer,int* dev_elementsToAdd,int *dev_elementsToOpe,int *dev_label,int *dev_min_pts) { int index=threadIdx.x; int size=*dev_query_size; int target=dev_elementsToOpe[index]; int counter=0; for(int i=0;i<size;i++) { int location = target*size+i; if(dev_pointer[location]==-9999) counter++; } if(counter>=*dev_min_pts-1) { for(int i=0;i<size;i++) { int location = target*size+i; if(dev_pointer[location]==-9999&&dev_label[i]==-1) { dev_elementsToAdd[i]=-9999; } } } } bool expand_cluster(vector<Point> &dataset, int p, int c, double eps, int min_pts) { queue<int> neighbor_pts; //queue<int> neighbor_ope;//pral_query dataset[p].lable = c; for(int i=0;i<datasize;i++) { if(total_query_result[p][i]==-9999) neighbor_pts.push(i); } int countOfOpe=0; while (!neighbor_pts.empty()) { countOfOpe=0; while(!neighbor_pts.empty()) { int t=neighbor_pts.front(); elementsToOpe[countOfOpe++]=t; neighbor_pts.pop(); dataset[t].lable=c; } for(int i=0;i<datasize;i++) { label[i]=dataset[i].lable; } hipMemcpy(dev_elementsToAdd, elementsToAdd, datasize*sizeof(int), hipMemcpyHostToDevice);// hipMemcpy(dev_elementsToOpe, elementsToOpe, datasize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_label, label, datasize * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( expand_cluster_kernal), dim3(1),dim3(countOfOpe), 0, 0, dev_query_size,dev_pointer,dev_elementsToAdd,dev_elementsToOpe,dev_label,dev_min_pts); hipMemcpy(elementsToAdd, dev_elementsToAdd, datasize*sizeof(int), hipMemcpyDeviceToHost); for(int i=0;i<datasize;i++) { if(elementsToAdd[i]==-9999) { neighbor_pts.push(i); elementsToAdd[i]=1; } } /* int neighbor = neighbor_pts.front(); queue<int> neighbor_pts1; //printf("neighbor is %d\n",neighbor); //printf("step into query2\n"); //region_query(dataset, neighbor, neighbor_pts1, eps); //printf("step out of query2\n"); for(int i=0;i<datasize;i++) { if(total_query_result[neighbor][i]==-9999) neighbor_pts1.push(i); } if (neighbor_pts1.size() >= min_pts - 1) { while (!neighbor_pts1.empty()) { int pt = neighbor_pts1.front(); if (dataset[pt].lable == -1) { neighbor_pts.push(pt); } neighbor_pts1.pop(); } } dataset[neighbor].lable = c; neighbor_pts.pop(); */ } return true; } /* // expand cluster formed by p, which works in a way of bfs. bool expand_cluster(vector<Point> &dataset, int p, int c, double eps, int min_pts) { queue<int> neighbor_pts; //queue<int> neighbor_ope;//pral_query dataset[p].lable = c; for(int i=0;i<datasize;i++) { if(total_query_result[p][i]==-9999) neighbor_pts.push(i); } while (!neighbor_pts.empty()) { int neighbor = neighbor_pts.front(); queue<int> neighbor_pts1; //printf("neighbor is %d\n",neighbor); //printf("step into query2\n"); //region_query(dataset, neighbor, neighbor_pts1, eps); //printf("step out of query2\n"); for(int i=0;i<datasize;i++) { if(total_query_result[neighbor][i]==-9999) neighbor_pts1.push(i); } if (neighbor_pts1.size() >= min_pts - 1) { while (!neighbor_pts1.empty()) { int pt = neighbor_pts1.front(); if (dataset[pt].lable == -1) { neighbor_pts.push(pt); } neighbor_pts1.pop(); } } dataset[neighbor].lable = c; neighbor_pts.pop(); } return true; } */ void allocate_data_init(vector<Point> &dataset, double eps){ // device memory allocate int size = dataset.size(); datasize=size; double *nodeX = (double*)malloc(size * sizeof(double)); double *nodeY = (double*)malloc(size * sizeof(double)); for (int i = 0; i<size; i++) { nodeX[i] = dataset[i].x; nodeY[i] = dataset[i].y; } hipMalloc((void**)&dev_nodeX, size * sizeof(double)); hipMalloc((void**)&dev_nodeY, size * sizeof(double)); hipMalloc((void**)&dev_eps, sizeof(double)); hipMemcpy(dev_nodeX, nodeX, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_nodeY, nodeY, size * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_eps, &eps, sizeof(double), hipMemcpyHostToDevice); //hipMalloc((void**)&dev_result, size * sizeof(int)); //hipMalloc((void**)&dev_p, sizeof(int)); hipMalloc((void**)&dev_pointer, datasize*datasize*sizeof(int)); free(nodeX); free(nodeY); hipMalloc((void**)&dev_query_size, sizeof(int)); hipMemcpy(dev_query_size, &datasize, sizeof(int), hipMemcpyHostToDevice); // host memory allocate //result = (int*)malloc(size * sizeof(int)); //final_result=(vector<int>*)malloc(size*queuesize); } void allocate_data_free() { // device memory free hipFree(dev_nodeX); hipFree(dev_nodeY); //hipFree(dev_result); //hipFree(dev_p); hipFree(dev_eps); hipFree(dev_pointer); hipFree(dev_query_size); // host memory free //free(result); //free(final_result); } void allocate_data_init2(vector<Point> &dataset, double eps,int min_pts){ // device memory allocate label = (int*)malloc(datasize * sizeof(int));//labellable for (int i = 0; i<datasize; i++) { label[i] = dataset[i].lable; } hipMalloc((void**)&dev_label, datasize * sizeof(int)); hipMemcpy(dev_label, label, datasize * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&dev_min_pts, sizeof(int)); hipMemcpy(dev_min_pts, &min_pts, sizeof(int), hipMemcpyHostToDevice); elementsToAdd = (int*)malloc(datasize*sizeof(int));//elementsToAdd for(int i=0;i<datasize;i++) elementsToAdd[i]=1; hipMalloc((void**)&dev_elementsToAdd, datasize * sizeof(int)); elementsToOpe = (int*)malloc(datasize*sizeof(int));//elementsToOpe hipMalloc((void**)&dev_elementsToOpe, datasize * sizeof(int)); //hipMalloc((void**)&dev_result, size * sizeof(int)); //hipMalloc((void**)&dev_p, sizeof(int)); // host memory allocate //result = (int*)malloc(size * sizeof(int)); //final_result=(vector<int>*)malloc(size*queuesize); } void allocate_data_free2() { // device memory free hipFree(dev_label); hipFree(dev_elementsToAdd); hipFree(dev_elementsToOpe);hipFree(dev_min_pts); //hipFree(dev_result); //hipFree(dev_p); // host memory free //free(result); free(label);free(elementsToAdd);free(elementsToOpe); //free(final_result); } // doing dbscan, given radius and minimum number of neigborhoods. int dbscan(vector<Point> &dataset, double eps, int min_pts) { int c = 0; // cluster lable //int count = 0; int p; int size=dataset.size(); int* query_target=(int*) malloc(size*sizeof(int)); for(int i=0;i<size;i++) query_target[i]=i; //queuesize=sizeof(query_target); allocate_data_init(dataset, eps);//dev_nodeX,dev_nodeY,dev_epsGPU allocate_data_init2(dataset, eps, min_pts); //pral_query // target total_query_result = (int**)malloc(size*sizeof(int*)); for(int i=0;i<size;i++) { total_query_result[i]=(int*)malloc(size*sizeof(int)); } // //printf("start to pral\n"); //clock_t start, finish; //double duration; //start = clock(); pral_query(query_target,total_query_result, eps);//query_target //finish = clock(); //duration = (double)(finish - start) / CLOCKS_PER_SEC; //cout << "pral: "<< duration << "s" << endl; //printf("finish to pral\n"); free(query_target); for (p = 0; p<size; p++) { queue<int> neighborhood; //printf("%d query start\n", count++); for(int i=0;i<size;i++) { if(total_query_result[p][i]==-9999) neighborhood.push(i); //printf("%d done\n",i); } //printf("%d query end\n", count++); //region_query(dataset, p, neighborhood, eps);//peps //printf("%d query end\n",count); if (neighborhood.size() + 1 < min_pts) {//p 0 // mark as noise //printf("miaomiaomiao?"); dataset[p].lable = 0; } else { if (dataset[p].lable == -1) {//p c++; printf("step into cluster %d\n",p); expand_cluster(dataset, p, c, eps, min_pts); printf("step out of cluster %d\n",p); } } } free(total_query_result); allocate_data_free(); allocate_data_free2(); return c; }
0ee1495e8a1d106fc378ced4c418418bb8c1689f.cu
/* 1.将计算距离的函数并行 2.将1优化,修改在GPU中重复申请空间的操作 3.将region query并行化,并将所有需要计算的结果全部并行计算出。之后遍历数组即可得到结果。 */ #include "dbscan.h" #include <math.h> #include <queue> #include<iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; /* // calculate eculidean distance of two 2-D points double euclidean_distance(Point a, Point b) { double x = a.x-b.x; double y = a.y-b.y; return sqrt(x*x+y*y); } // get neighborhood of point p and add it to neighborhood queue int region_query( vector<Point> &dataset, int p, queue<int> &neighborhood, double eps) { //int count = 0; for (int i = 0; i < dataset.size(); i++) { //cout << "regin_query" << count++ << endl; if(i!=p){ int dist = euclidean_distance(dataset[p],dataset[i]); if ( dist< eps) { neighborhood.push(i); } } } return (int)neighborhood.size(); } */ unsigned int total = 0; double* dev_nodeX; double* dev_nodeY; int* dev_result; int* dev_p; int *dev_label,*dev_elementsToAdd,*dev_elementsToOpe; int *dev_query_size; int **dev_total_query_result; double* dev_eps; int *dev_min_pts; int *dev_pointer; int *result; int datasize; int** total_query_result; int *label,*elementsToAdd,*elementsToOpe; //vector<int> *final_result; //int queuesize; /* __device__ int cal(double *dev_nodeX, double *dev_nodeY, int* dev_p, int* dev_i,double* dev_eps) { int i = *dev_i; if (i != *dev_p) { //int dist=euclidean_distance(dev_nodeX[i],dev_nodeY[i],dev_nodeX[*dev_p],dev_nodeY[*dev_p]); double x = dev_nodeX[i] - dev_nodeX[*dev_p]; double y = dev_nodeY[i] - dev_nodeY[*dev_p]; int dist = sqrt(x*x + y*y); //if (dist<*dev_eps) printf(" #%d (%.3f, %.3f) -> #%d(%.3f,%.3f) dist is %d\n",i, dev_nodeX[i], dev_nodeY[i], *dev_p, dev_nodeX[*dev_p], dev_nodeY[*dev_p], dist); if (dist<*dev_eps) { return 1; } } return 0; } */ __global__ void region_query_kernal(double *dev_nodeX, double *dev_nodeY,int* dev_query_size, int* dev_query_target, int *dev_pointer, double* dev_eps) { //printf("???"); int size = *dev_query_size; int tid = threadIdx.x; int bid = blockIdx.x; int index=bid*blockDim.x+tid; //printf("%d ah\n",index); int i=index/size;//当前处理的是第几行,行标号 int j=index%size;//当前处理的是第几个元素,列标号 int target = dev_query_target[i];//当前处理的是哪一个目标元素 //int value=cal(dev_nodeX,dev_nodeY,&target,&j,dev_eps);//计算当前处理的元素与目标元素的距离,如果符合要求返回1,否则返回0 int value=0; if (target != j) { //int dist=euclidean_distance(dev_nodeX[i],dev_nodeY[i],dev_nodeX[*dev_p],dev_nodeY[*dev_p]); double x = dev_nodeX[j] - dev_nodeX[target]; double y = dev_nodeY[j] - dev_nodeY[target]; int dist = sqrt(x*x + y*y); //if (dist<*dev_eps) printf(" #%d (%.3f, %.3f) -> #%d(%.3f,%.3f) dist is %d\n",i, dev_nodeX[i], dev_nodeY[i], *dev_p, dev_nodeX[*dev_p], dev_nodeY[*dev_p], dist); if (dist<*dev_eps) { value=1; } } if(value){//如果当前处理的元素符合要求,则标-9999,否则标1 //printf("???\n"); dev_pointer[index]=-9999; } else{ dev_pointer[index]=1; } } void pral_query(int*query_target,int**total_query_result, double eps) { //int *query_target;//存储要并行求的元素 //int **query_result;//结果 int *pointer; //cudaError_t cudaStatus; int *dev_query_target; //int **dev_query_result; //cudaMalloc((void***)&dev_query_result, datasize*sizeof(int*)); //query_result=(int**)malloc(datasize*sizeof(int*)); pointer=(int*)malloc(datasize*datasize*sizeof(int)); /* for(int i=0;i<datasize;i++) { query_result[i]=dev_pointer+i*datasize; } */ //cudaMemcpy(dev_query_result, query_result, datasize*sizeof(int*), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_query_target, datasize*sizeof(int)); cudaMemcpy(dev_query_target, query_target, datasize*sizeof(int), cudaMemcpyHostToDevice); //printf("step into kernal\n"); region_query_kernal<<<(datasize*datasize+511)/512,512>>>(dev_nodeX, dev_nodeY,dev_query_size, dev_query_target, dev_pointer, dev_eps); //printf("step out of kernal\n"); //cudaError_t error = cudaGetLastError(); //printf("CUDA error: %s\n", cudaGetErrorString(error)); cudaThreadSynchronize(); cudaMemcpy(pointer, dev_pointer, datasize*datasize*sizeof(int), cudaMemcpyDeviceToHost); for(int p=0;p<datasize*datasize;p++) { int i=p/datasize; int j=p%datasize; total_query_result[i][j]=pointer[p]; } //free(query_result); free(pointer); cudaFree(dev_query_target); //cudaFree(dev_query_result); } __global__ void expand_cluster_kernal(int *dev_query_size,int *dev_pointer,int* dev_elementsToAdd,int *dev_elementsToOpe,int *dev_label,int *dev_min_pts) { int index=threadIdx.x; int size=*dev_query_size; int target=dev_elementsToOpe[index]; int counter=0; for(int i=0;i<size;i++) { int location = target*size+i; if(dev_pointer[location]==-9999) counter++; } if(counter>=*dev_min_pts-1) { for(int i=0;i<size;i++) { int location = target*size+i; if(dev_pointer[location]==-9999&&dev_label[i]==-1) { dev_elementsToAdd[i]=-9999; } } } } bool expand_cluster(vector<Point> &dataset, int p, int c, double eps, int min_pts) { queue<int> neighbor_pts; //queue<int> neighbor_ope;//提供给pral_query函数供其踢除 dataset[p].lable = c; for(int i=0;i<datasize;i++) { if(total_query_result[p][i]==-9999) neighbor_pts.push(i); } int countOfOpe=0; while (!neighbor_pts.empty()) { countOfOpe=0; while(!neighbor_pts.empty()) { int t=neighbor_pts.front(); elementsToOpe[countOfOpe++]=t; neighbor_pts.pop(); dataset[t].lable=c; } for(int i=0;i<datasize;i++) { label[i]=dataset[i].lable; } cudaMemcpy(dev_elementsToAdd, elementsToAdd, datasize*sizeof(int), cudaMemcpyHostToDevice);//初始化标记数组,清除上次操作的影响; cudaMemcpy(dev_elementsToOpe, elementsToOpe, datasize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_label, label, datasize * sizeof(int), cudaMemcpyHostToDevice); expand_cluster_kernal<<<1,countOfOpe>>>(dev_query_size,dev_pointer,dev_elementsToAdd,dev_elementsToOpe,dev_label,dev_min_pts); cudaMemcpy(elementsToAdd, dev_elementsToAdd, datasize*sizeof(int), cudaMemcpyDeviceToHost); for(int i=0;i<datasize;i++) { if(elementsToAdd[i]==-9999) { neighbor_pts.push(i); elementsToAdd[i]=1; } } /* int neighbor = neighbor_pts.front(); queue<int> neighbor_pts1; //printf("neighbor is %d\n",neighbor); //printf("step into query2\n"); //region_query(dataset, neighbor, neighbor_pts1, eps); //printf("step out of query2\n"); for(int i=0;i<datasize;i++) { if(total_query_result[neighbor][i]==-9999) neighbor_pts1.push(i); } if (neighbor_pts1.size() >= min_pts - 1) { while (!neighbor_pts1.empty()) { int pt = neighbor_pts1.front(); if (dataset[pt].lable == -1) { neighbor_pts.push(pt); } neighbor_pts1.pop(); } } dataset[neighbor].lable = c; neighbor_pts.pop(); */ } return true; } /* // expand cluster formed by p, which works in a way of bfs. bool expand_cluster(vector<Point> &dataset, int p, int c, double eps, int min_pts) { queue<int> neighbor_pts; //queue<int> neighbor_ope;//提供给pral_query函数供其踢除 dataset[p].lable = c; for(int i=0;i<datasize;i++) { if(total_query_result[p][i]==-9999) neighbor_pts.push(i); } while (!neighbor_pts.empty()) { int neighbor = neighbor_pts.front(); queue<int> neighbor_pts1; //printf("neighbor is %d\n",neighbor); //printf("step into query2\n"); //region_query(dataset, neighbor, neighbor_pts1, eps); //printf("step out of query2\n"); for(int i=0;i<datasize;i++) { if(total_query_result[neighbor][i]==-9999) neighbor_pts1.push(i); } if (neighbor_pts1.size() >= min_pts - 1) { while (!neighbor_pts1.empty()) { int pt = neighbor_pts1.front(); if (dataset[pt].lable == -1) { neighbor_pts.push(pt); } neighbor_pts1.pop(); } } dataset[neighbor].lable = c; neighbor_pts.pop(); } return true; } */ void allocate_data_init(vector<Point> &dataset, double eps){ // device memory allocate int size = dataset.size(); datasize=size; double *nodeX = (double*)malloc(size * sizeof(double)); double *nodeY = (double*)malloc(size * sizeof(double)); for (int i = 0; i<size; i++) { nodeX[i] = dataset[i].x; nodeY[i] = dataset[i].y; } cudaMalloc((void**)&dev_nodeX, size * sizeof(double)); cudaMalloc((void**)&dev_nodeY, size * sizeof(double)); cudaMalloc((void**)&dev_eps, sizeof(double)); cudaMemcpy(dev_nodeX, nodeX, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_nodeY, nodeY, size * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_eps, &eps, sizeof(double), cudaMemcpyHostToDevice); //cudaMalloc((void**)&dev_result, size * sizeof(int)); //cudaMalloc((void**)&dev_p, sizeof(int)); cudaMalloc((void**)&dev_pointer, datasize*datasize*sizeof(int)); free(nodeX); free(nodeY); cudaMalloc((void**)&dev_query_size, sizeof(int)); cudaMemcpy(dev_query_size, &datasize, sizeof(int), cudaMemcpyHostToDevice); // host memory allocate //result = (int*)malloc(size * sizeof(int)); //final_result=(vector<int>*)malloc(size*queuesize); } void allocate_data_free() { // device memory free cudaFree(dev_nodeX); cudaFree(dev_nodeY); //cudaFree(dev_result); //cudaFree(dev_p); cudaFree(dev_eps); cudaFree(dev_pointer); cudaFree(dev_query_size); // host memory free //free(result); //free(final_result); } void allocate_data_init2(vector<Point> &dataset, double eps,int min_pts){ // device memory allocate label = (int*)malloc(datasize * sizeof(int));//label存放每个元素对应的lable for (int i = 0; i<datasize; i++) { label[i] = dataset[i].lable; } cudaMalloc((void**)&dev_label, datasize * sizeof(int)); cudaMemcpy(dev_label, label, datasize * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_min_pts, sizeof(int)); cudaMemcpy(dev_min_pts, &min_pts, sizeof(int), cudaMemcpyHostToDevice); elementsToAdd = (int*)malloc(datasize*sizeof(int));//elementsToAdd标记下次放入队列中要进行扩展的点 for(int i=0;i<datasize;i++) elementsToAdd[i]=1; cudaMalloc((void**)&dev_elementsToAdd, datasize * sizeof(int)); elementsToOpe = (int*)malloc(datasize*sizeof(int));//elementsToOpe存放要并行扩展的点 cudaMalloc((void**)&dev_elementsToOpe, datasize * sizeof(int)); //cudaMalloc((void**)&dev_result, size * sizeof(int)); //cudaMalloc((void**)&dev_p, sizeof(int)); // host memory allocate //result = (int*)malloc(size * sizeof(int)); //final_result=(vector<int>*)malloc(size*queuesize); } void allocate_data_free2() { // device memory free cudaFree(dev_label); cudaFree(dev_elementsToAdd); cudaFree(dev_elementsToOpe);cudaFree(dev_min_pts); //cudaFree(dev_result); //cudaFree(dev_p); // host memory free //free(result); free(label);free(elementsToAdd);free(elementsToOpe); //free(final_result); } // doing dbscan, given radius and minimum number of neigborhoods. int dbscan(vector<Point> &dataset, double eps, int min_pts) { int c = 0; // cluster lable //int count = 0; int p; int size=dataset.size(); int* query_target=(int*) malloc(size*sizeof(int)); for(int i=0;i<size;i++) query_target[i]=i; //queuesize=sizeof(query_target); allocate_data_init(dataset, eps);//这里已经将dev_nodeX,dev_nodeY,dev_eps拷贝到GPU中 allocate_data_init2(dataset, eps, min_pts); //对于每一次调用pral_query函数,必须把结果数组申请出来,然后把它作为参数传入 //结果数组中的每一行 对应target中的一个元素的邻居 total_query_result = (int**)malloc(size*sizeof(int*)); for(int i=0;i<size;i++) { total_query_result[i]=(int*)malloc(size*sizeof(int)); } //并行求出每个元素的邻居 //printf("start to pral\n"); //clock_t start, finish; //double duration; //start = clock(); pral_query(query_target,total_query_result, eps);//query_target中的元素被踢除 //finish = clock(); //duration = (double)(finish - start) / CLOCKS_PER_SEC; //cout << "pral: "<< duration << "s" << endl; //printf("finish to pral\n"); free(query_target); for (p = 0; p<size; p++) { queue<int> neighborhood; //printf("%d query start\n", count++); for(int i=0;i<size;i++) { if(total_query_result[p][i]==-9999) neighborhood.push(i); //printf("%d done\n",i); } //printf("%d query end\n", count++); //region_query(dataset, p, neighborhood, eps);//找到p的相邻节点,它们之间的距离小于eps,将相邻节点存放到队列中 //printf("%d query end\n",count); if (neighborhood.size() + 1 < min_pts) {//如果p相离节点与其组成团的大小 小于最小的要求,将其标记为0 // mark as noise //printf("miaomiaomiao?"); dataset[p].lable = 0; } else { if (dataset[p].lable == -1) {//否则,如果p没有被分类,则将其相邻节点扩展 c++; printf("step into cluster %d\n",p); expand_cluster(dataset, p, c, eps, min_pts); printf("step out of cluster %d\n",p); } } } free(total_query_result); allocate_data_free(); allocate_data_free2(); return c; }
003842b1bf51061d7c306327b53fc0b687a5c6df.hip
// !!! This is a file automatically generated by hipify!!! /* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.7 * copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre * Date: April 2020 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]) * All rights reserved */ #include <string.h> #include <cstdio> #include <cstdlib> #include <iostream> #include <hip/hip_runtime.h> #include "eddl/hardware/gpu/gpu_kernels.h" __global__ void gpu_abs(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = fabsf(A[thread_id_x]); } } __global__ void gpu_acos(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = acosf(A[thread_id_x]); } } __global__ void gpu_add(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] + v; } } __global__ void gpu_asin(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = asinf(A[thread_id_x]); } } __global__ void gpu_atan(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = atanf(A[thread_id_x]); } } __global__ void gpu_ceil(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = ceilf(A[thread_id_x]); } } __global__ void gpu_clamp(float *A, float *B, long int size, float min, float max){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size) if (A[thread_id_x] < min){ B[thread_id_x] = min; }else if(A[thread_id_x] > max){ B[thread_id_x] = max; }else { B[thread_id_x] = A[thread_id_x]; } } __global__ void gpu_cos(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = cosf(A[thread_id_x]); } } __global__ void gpu_cosh(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = coshf(A[thread_id_x]); } } __global__ void gpu_exp(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = expf(A[thread_id_x]); } } __global__ void gpu_floor(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = floorf(A[thread_id_x]); } } __global__ void gpu_log(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = logf(A[thread_id_x]); } } __global__ void gpu_log2(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = log2f(A[thread_id_x]); } } __global__ void gpu_log10(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = log10f(A[thread_id_x]); } } __global__ void gpu_logn(float *A, float *B, long int size, float n){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = logf(A[thread_id_x])/logf(n); } } __global__ void gpu_mod(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = fmodf(A[thread_id_x], v); } } __global__ void gpu_inv(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = v/A[thread_id_x]; } } __global__ void gpu_mult(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] * v; } } __global__ void gpu_normalize(float *A, float *B, long int size, float min_ori, float max_ori, float min, float max){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = (max-min)/(max_ori-min_ori) * (A[thread_id_x]-min_ori) + min; } } __global__ void gpu_pow(float *A, float *B, long int size, float exp){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = powf(A[thread_id_x], exp); } } __global__ void gpu_powb(float *A, float *B, long int size, float base){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = powf(base, A[thread_id_x]); } } __global__ void gpu_remainder(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = fmod((v + fmod(A[thread_id_x], v)), v); } } __global__ void gpu_round(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = roundf(A[thread_id_x]); } } __global__ void gpu_rsqrt(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = 1.0f/sqrtf(A[thread_id_x]); } } __global__ void gpu_sigmoid(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = 1.0f/(1.0f + ::expf(-A[thread_id_x])); } } __global__ void gpu_sign(float *A, float *B, long int size, float zero_sign){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ if(A[thread_id_x] > 0.0f){ B[thread_id_x] = 1.0f; }else if(A[thread_id_x] < 0.0f){ B[thread_id_x] = -1.0f; }else{ B[thread_id_x] = zero_sign; } } } __global__ void gpu_sin(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = sinf(A[thread_id_x]); } } __global__ void gpu_sinh(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = sinhf(A[thread_id_x]); } } __global__ void gpu_sqr(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] * A[thread_id_x]; } } __global__ void gpu_sqrt(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = sqrtf(A[thread_id_x]); } } __global__ void gpu_tan(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = tanf(A[thread_id_x]); } } __global__ void gpu_tanh(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = tanhf(A[thread_id_x]); } } __global__ void gpu_trunc(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = truncf(A[thread_id_x]); } } // CPU: Math (static) *************************** __global__ void gpu_add(float scA, float *A, float scB, float *B, float *C, long int incC, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size) { if (incC) C[thread_id_x] += scA * A[thread_id_x] + scB * B[thread_id_x]; else C[thread_id_x] = scA * A[thread_id_x] + scB * B[thread_id_x]; } } __global__ void gpu_el_mult(float *A, float *B, float *C, long int incC, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ if (incC) C[thread_id_x] += A[thread_id_x] * B[thread_id_x]; else C[thread_id_x] = A[thread_id_x] * B[thread_id_x]; } } __global__ void gpu_el_div(float *A, float *B, float *C, long int incC, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ if (incC) C[thread_id_x] += A[thread_id_x]/(B[thread_id_x]); else C[thread_id_x] = A[thread_id_x]/(B[thread_id_x]); } } __global__ void gpu_sum2D_rowwise(float *A, float *B, float *C, long int rows,long int cols){ long int ops=rows*cols; long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < ops){ C[thread_id_x]=A[thread_id_x]+B[thread_id_x%cols]; } } __global__ void gpu_sum2D_colwise(float *A, float *B, float *C, long int rows,long int cols){ long int ops=rows*cols; long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < ops){ C[thread_id_x]=A[thread_id_x]+B[thread_id_x/cols]; } } __global__ void gpu_reduce_sum2D(float *A,float *B,long int rows,long int cols,long int axis){ long int ops=rows*cols; long int thread_id_x = threadIdx.x+(blockDim.x*blockIdx.x); if (thread_id_x < ops){ if (axis==0) atomicAdd(&(B[thread_id_x%cols]),A[thread_id_x]); else atomicAdd(&(B[thread_id_x/cols]),A[thread_id_x]); } } __global__ void gpu_maximum(float* A, float* B, float v, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = max(A[thread_id_x], v); } } __global__ void gpu_maximum(float* A, float* B, float* C, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ C[thread_id_x] = max(A[thread_id_x], B[thread_id_x]); } } __global__ void gpu_minimum(float* A, float* B, float v, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = min(A[thread_id_x], v); } } __global__ void gpu_minimum(float* A, float* B, float* C, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ C[thread_id_x] = min(A[thread_id_x], B[thread_id_x]); } }
003842b1bf51061d7c306327b53fc0b687a5c6df.cu
/* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.7 * copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre * Date: April 2020 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]) * All rights reserved */ #include <string.h> #include <cstdio> #include <cstdlib> #include <iostream> #include <cuda.h> #include "eddl/hardware/gpu/gpu_kernels.h" __global__ void gpu_abs(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = fabsf(A[thread_id_x]); } } __global__ void gpu_acos(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = acosf(A[thread_id_x]); } } __global__ void gpu_add(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] + v; } } __global__ void gpu_asin(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = asinf(A[thread_id_x]); } } __global__ void gpu_atan(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = atanf(A[thread_id_x]); } } __global__ void gpu_ceil(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = ceilf(A[thread_id_x]); } } __global__ void gpu_clamp(float *A, float *B, long int size, float min, float max){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size) if (A[thread_id_x] < min){ B[thread_id_x] = min; }else if(A[thread_id_x] > max){ B[thread_id_x] = max; }else { B[thread_id_x] = A[thread_id_x]; } } __global__ void gpu_cos(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = cosf(A[thread_id_x]); } } __global__ void gpu_cosh(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = coshf(A[thread_id_x]); } } __global__ void gpu_exp(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = expf(A[thread_id_x]); } } __global__ void gpu_floor(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = floorf(A[thread_id_x]); } } __global__ void gpu_log(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = logf(A[thread_id_x]); } } __global__ void gpu_log2(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = log2f(A[thread_id_x]); } } __global__ void gpu_log10(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = log10f(A[thread_id_x]); } } __global__ void gpu_logn(float *A, float *B, long int size, float n){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = logf(A[thread_id_x])/logf(n); } } __global__ void gpu_mod(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = fmodf(A[thread_id_x], v); } } __global__ void gpu_inv(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = v/A[thread_id_x]; } } __global__ void gpu_mult(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] * v; } } __global__ void gpu_normalize(float *A, float *B, long int size, float min_ori, float max_ori, float min, float max){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = (max-min)/(max_ori-min_ori) * (A[thread_id_x]-min_ori) + min; } } __global__ void gpu_pow(float *A, float *B, long int size, float exp){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = powf(A[thread_id_x], exp); } } __global__ void gpu_powb(float *A, float *B, long int size, float base){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = powf(base, A[thread_id_x]); } } __global__ void gpu_remainder(float *A, float *B, long int size, float v){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = fmod((v + fmod(A[thread_id_x], v)), v); } } __global__ void gpu_round(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = roundf(A[thread_id_x]); } } __global__ void gpu_rsqrt(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = 1.0f/sqrtf(A[thread_id_x]); } } __global__ void gpu_sigmoid(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = 1.0f/(1.0f + ::expf(-A[thread_id_x])); } } __global__ void gpu_sign(float *A, float *B, long int size, float zero_sign){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ if(A[thread_id_x] > 0.0f){ B[thread_id_x] = 1.0f; }else if(A[thread_id_x] < 0.0f){ B[thread_id_x] = -1.0f; }else{ B[thread_id_x] = zero_sign; } } } __global__ void gpu_sin(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = sinf(A[thread_id_x]); } } __global__ void gpu_sinh(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = sinhf(A[thread_id_x]); } } __global__ void gpu_sqr(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = A[thread_id_x] * A[thread_id_x]; } } __global__ void gpu_sqrt(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = sqrtf(A[thread_id_x]); } } __global__ void gpu_tan(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = tanf(A[thread_id_x]); } } __global__ void gpu_tanh(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = tanhf(A[thread_id_x]); } } __global__ void gpu_trunc(float *A, float *B, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = truncf(A[thread_id_x]); } } // CPU: Math (static) *************************** __global__ void gpu_add(float scA, float *A, float scB, float *B, float *C, long int incC, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size) { if (incC) C[thread_id_x] += scA * A[thread_id_x] + scB * B[thread_id_x]; else C[thread_id_x] = scA * A[thread_id_x] + scB * B[thread_id_x]; } } __global__ void gpu_el_mult(float *A, float *B, float *C, long int incC, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ if (incC) C[thread_id_x] += A[thread_id_x] * B[thread_id_x]; else C[thread_id_x] = A[thread_id_x] * B[thread_id_x]; } } __global__ void gpu_el_div(float *A, float *B, float *C, long int incC, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ if (incC) C[thread_id_x] += A[thread_id_x]/(B[thread_id_x]); else C[thread_id_x] = A[thread_id_x]/(B[thread_id_x]); } } __global__ void gpu_sum2D_rowwise(float *A, float *B, float *C, long int rows,long int cols){ long int ops=rows*cols; long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < ops){ C[thread_id_x]=A[thread_id_x]+B[thread_id_x%cols]; } } __global__ void gpu_sum2D_colwise(float *A, float *B, float *C, long int rows,long int cols){ long int ops=rows*cols; long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < ops){ C[thread_id_x]=A[thread_id_x]+B[thread_id_x/cols]; } } __global__ void gpu_reduce_sum2D(float *A,float *B,long int rows,long int cols,long int axis){ long int ops=rows*cols; long int thread_id_x = threadIdx.x+(blockDim.x*blockIdx.x); if (thread_id_x < ops){ if (axis==0) atomicAdd(&(B[thread_id_x%cols]),A[thread_id_x]); else atomicAdd(&(B[thread_id_x/cols]),A[thread_id_x]); } } __global__ void gpu_maximum(float* A, float* B, float v, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = max(A[thread_id_x], v); } } __global__ void gpu_maximum(float* A, float* B, float* C, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ C[thread_id_x] = max(A[thread_id_x], B[thread_id_x]); } } __global__ void gpu_minimum(float* A, float* B, float v, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ B[thread_id_x] = min(A[thread_id_x], v); } } __global__ void gpu_minimum(float* A, float* B, float* C, long int size){ long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x; if (thread_id_x < size){ C[thread_id_x] = min(A[thread_id_x], B[thread_id_x]); } }
e6fac72a93fadd5ca5e84678c9d820ff256690fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #define ABS(x) ((x) > 0 ? (x) : - (x)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) __global__ void kernel_projection(float *proj, float *img, float angle, float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz){ int ia = 16 * blockIdx.x + threadIdx.x; int ib = 16 * blockIdx.y + threadIdx.y; if (ia >= na || ib >= nb) return; int id = ia + ib * na; proj[id] = 0.0f; float x1, y1, z1, x2, y2, z2, x20, y20, cphi, sphi; cphi = (float)cosf(angle); sphi = (float)sinf(angle); x1 = -SO * cphi; y1 = -SO * sphi; z1 = 0.0f; x20 = SD - SO; y20 = (ia + ai) * da; // locate the detector cell center before any rotation x2 = x20 * cphi - y20 * sphi; y2 = x20 * sphi + y20 * cphi; z2 = (ib + bi) * db; float x21, y21, z21; // offset between source and detector center x21 = x2 - x1; y21 = y2 - y1; z21 = z2 - z1; // y - z plane, where ABS(x21) > ABS(y21) if (ABS(x21) > ABS(y21)){ // if (ABS(cphi) > ABS(sphi)){ float yi1, yi2, ky1, ky2, zi1, zi2, kz1, kz2; int Yi1, Yi2, Zi1, Zi2; // for each y - z plane, we calculate and add the contribution of related pixels for (int ix = 0; ix < nx; ix++){ // calculate y indices of intersecting voxel candidates ky1 = (y21 - da / 2 * cphi) / (x21 + da / 2 * sphi); yi1 = ky1 * ((float)ix + 0.5f - x1 - nx / 2) + y1 + ny / 2; Yi1 = (int)floor(yi1); // lower boundary of related voxels at y-axis ky2 = (y21 + da / 2 * cphi) / (x21 - da / 2 * sphi); yi2 = ky2 * ((float)ix + 0.5f - x1 - nx / 2) + y1 + ny / 2; Yi2 = (int)floor(yi2); // upper boundary of related voxels at y-axis // if (Yi1 < 0) // Yi1 = 0; // if (Yi2 >= ny) // Yi2 = ny - 1; // calculate z indices of intersecting voxel candidates kz1 = (z21 - db / 2) / x21; zi1 = kz1 * ((float)ix + 0.5f - x1 - nx / 2) + z1 + nz / 2; Zi1 = (int)floor(zi1); // lower boundary of related voxels at y-axis kz2 = (z21 + db / 2) / x21; zi2 = kz2 * ((float)ix + 0.5f - x1 - nx / 2) + z1 + nz / 2; Zi2 = (int)floor(zi2); // upper boundary of related voxels at y-axis // if (Zi1 < 0) // Zi1 = 0; // if (Zi2 >= nz) // Zi2 = nz - 1; // calculate contribution of a voxel to the projection value int iy, iz; float wy1, wy2, wz1, wz2; if (ABS(yi2 - yi1) < 0.01f) continue; if (ABS(zi2 - zi1) < 0.01f) continue; wy1 = (MAX(Yi1, Yi2) - yi1) / (yi2 - yi1); wy2 = 1 - wy1; wz1 = (MAX(Zi1, Zi2) - zi1) / (zi2 - zi1); wz2 = 1 - wz1; // Yi1 == Yi2 && Zi1 == Zi2 if (Yi1 == Yi2 && Zi1 == Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * 1.0f; continue; } // Yi1 != Yi2 && Zi1 == Zi2 if (Yi1 != Yi2 && Zi1 == Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1; iy = Yi2; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2; continue; } // Yi1 == Yi2 && Zi1 != Zi2 if (Yi1 == Yi2 && Zi1 != Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz1; iy = Yi1; iz = Zi2; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz2; continue; } // Yi1 != Yi2 && Zi1 != Zi2 if (Yi1 != Yi2 && Zi1 != Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1 * wz1; iy = Yi1; iz = Zi2; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1 * wz2; iy = Yi2; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2 * wz1; iy = Yi2; iz = Zi2; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2 * wz2; continue; } } } // x - z plane, where ABS(x21) <= ABS(y21) else{ float xi1, xi2, kx1, kx2, zi1, zi2, kz1, kz2; int Xi1, Xi2, Zi1, Zi2; // for each y - z plane, we calculate and add the contribution of related pixels for (int iy = 0; iy < ny; iy++){ // calculate y indices of intersecting voxel candidates kx1 = (x21 - da / 2 * sphi) / (y21 + da / 2 * cphi); xi1 = kx1 * ((float)iy + 0.5f - y1 - ny / 2) + x1 + nx / 2; Xi1 = (int)floor(xi1); // lower boundary of related voxels at y-axis kx2 = (x21 + da / 2 * sphi) / (y21 - da / 2 * cphi); xi2 = kx2 * ((float)iy + 0.5f - y1 - ny / 2) + x1 + nx / 2; Xi2 = (int)floor(xi2); // upper boundary of related voxels at y-axis // if (Xi1 < 0) // Xi1 = 0; // if (Xi2 >= ny) // Xi2 = ny - 1; // calculate z indices of intersecting voxel candidates kz1 = (z21 - db / 2) / y21; zi1 = kz1 * ((float)iy + 0.5f - y1 - ny / 2) + z1 + nz / 2; Zi1 = (int)floor(zi1); // lower boundary of related voxels at y-axis kz2 = (z21 + db / 2) / y21; zi2 = kz2 * ((float)iy + 0.5f - y1 - ny / 2) + z1 + nz / 2; Zi2 = (int)floor(zi2); // upper boundary of related voxels at y-axis // if (Zi1 < 0) // Zi1 = 0; // if (Zi2 >= nz) // Zi2 = nz - 1; // calculate contribution of a voxel to the projection value int ix, iz; float wx1, wx2, wz1, wz2; if (ABS(xi2 - xi1) < 0.01f) continue; if (ABS(zi2 - zi1) < 0.01f) continue; wx1 = (MAX(Xi1, Xi2) - xi1) / (xi2 - xi1); wx2 = 1 - wx1; wz1 = (MAX(Zi1, Zi2) - zi1) / (zi2 - zi1); wz2 = 1 - wz1; // Xi1 == Xi2 && Zi1 == Zi2 if (Xi1 == Xi2 && Zi1 == Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * 1.0f; continue; } // Xi1 != Xi2 && Zi1 == Zi2 if (Xi1 != Xi2 && Zi1 == Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1; ix = Xi2; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2; continue; } // Xi1 == Xi2 && Zi1 != Zi2 if (Xi1 == Xi2 && Zi1 != Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz1; ix = Xi1; iz = Zi2; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz2; continue; } // Xi1 != Xi2 && Zi1 != Zi2 if (Xi1 != Xi2 && Zi1 != Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1 * wz1; ix = Xi1; iz = Zi2; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1 * wz2; ix = Xi2; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2 * wz1; ix = Xi2; iz = Zi2; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2 * wz2; continue; } } } }
e6fac72a93fadd5ca5e84678c9d820ff256690fd.cu
#include <math.h> #define ABS(x) ((x) > 0 ? (x) : - (x)) #define MAX(a, b) ((a) > (b) ? (a) : (b)) __global__ void kernel_projection(float *proj, float *img, float angle, float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz){ int ia = 16 * blockIdx.x + threadIdx.x; int ib = 16 * blockIdx.y + threadIdx.y; if (ia >= na || ib >= nb) return; int id = ia + ib * na; proj[id] = 0.0f; float x1, y1, z1, x2, y2, z2, x20, y20, cphi, sphi; cphi = (float)cosf(angle); sphi = (float)sinf(angle); x1 = -SO * cphi; y1 = -SO * sphi; z1 = 0.0f; x20 = SD - SO; y20 = (ia + ai) * da; // locate the detector cell center before any rotation x2 = x20 * cphi - y20 * sphi; y2 = x20 * sphi + y20 * cphi; z2 = (ib + bi) * db; float x21, y21, z21; // offset between source and detector center x21 = x2 - x1; y21 = y2 - y1; z21 = z2 - z1; // y - z plane, where ABS(x21) > ABS(y21) if (ABS(x21) > ABS(y21)){ // if (ABS(cphi) > ABS(sphi)){ float yi1, yi2, ky1, ky2, zi1, zi2, kz1, kz2; int Yi1, Yi2, Zi1, Zi2; // for each y - z plane, we calculate and add the contribution of related pixels for (int ix = 0; ix < nx; ix++){ // calculate y indices of intersecting voxel candidates ky1 = (y21 - da / 2 * cphi) / (x21 + da / 2 * sphi); yi1 = ky1 * ((float)ix + 0.5f - x1 - nx / 2) + y1 + ny / 2; Yi1 = (int)floor(yi1); // lower boundary of related voxels at y-axis ky2 = (y21 + da / 2 * cphi) / (x21 - da / 2 * sphi); yi2 = ky2 * ((float)ix + 0.5f - x1 - nx / 2) + y1 + ny / 2; Yi2 = (int)floor(yi2); // upper boundary of related voxels at y-axis // if (Yi1 < 0) // Yi1 = 0; // if (Yi2 >= ny) // Yi2 = ny - 1; // calculate z indices of intersecting voxel candidates kz1 = (z21 - db / 2) / x21; zi1 = kz1 * ((float)ix + 0.5f - x1 - nx / 2) + z1 + nz / 2; Zi1 = (int)floor(zi1); // lower boundary of related voxels at y-axis kz2 = (z21 + db / 2) / x21; zi2 = kz2 * ((float)ix + 0.5f - x1 - nx / 2) + z1 + nz / 2; Zi2 = (int)floor(zi2); // upper boundary of related voxels at y-axis // if (Zi1 < 0) // Zi1 = 0; // if (Zi2 >= nz) // Zi2 = nz - 1; // calculate contribution of a voxel to the projection value int iy, iz; float wy1, wy2, wz1, wz2; if (ABS(yi2 - yi1) < 0.01f) continue; if (ABS(zi2 - zi1) < 0.01f) continue; wy1 = (MAX(Yi1, Yi2) - yi1) / (yi2 - yi1); wy2 = 1 - wy1; wz1 = (MAX(Zi1, Zi2) - zi1) / (zi2 - zi1); wz2 = 1 - wz1; // Yi1 == Yi2 && Zi1 == Zi2 if (Yi1 == Yi2 && Zi1 == Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * 1.0f; continue; } // Yi1 != Yi2 && Zi1 == Zi2 if (Yi1 != Yi2 && Zi1 == Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1; iy = Yi2; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2; continue; } // Yi1 == Yi2 && Zi1 != Zi2 if (Yi1 == Yi2 && Zi1 != Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz1; iy = Yi1; iz = Zi2; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz2; continue; } // Yi1 != Yi2 && Zi1 != Zi2 if (Yi1 != Yi2 && Zi1 != Zi2) { iy = Yi1; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1 * wz1; iy = Yi1; iz = Zi2; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1 * wz2; iy = Yi2; iz = Zi1; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2 * wz1; iy = Yi2; iz = Zi2; if (iy < ny && iy >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2 * wz2; continue; } } } // x - z plane, where ABS(x21) <= ABS(y21) else{ float xi1, xi2, kx1, kx2, zi1, zi2, kz1, kz2; int Xi1, Xi2, Zi1, Zi2; // for each y - z plane, we calculate and add the contribution of related pixels for (int iy = 0; iy < ny; iy++){ // calculate y indices of intersecting voxel candidates kx1 = (x21 - da / 2 * sphi) / (y21 + da / 2 * cphi); xi1 = kx1 * ((float)iy + 0.5f - y1 - ny / 2) + x1 + nx / 2; Xi1 = (int)floor(xi1); // lower boundary of related voxels at y-axis kx2 = (x21 + da / 2 * sphi) / (y21 - da / 2 * cphi); xi2 = kx2 * ((float)iy + 0.5f - y1 - ny / 2) + x1 + nx / 2; Xi2 = (int)floor(xi2); // upper boundary of related voxels at y-axis // if (Xi1 < 0) // Xi1 = 0; // if (Xi2 >= ny) // Xi2 = ny - 1; // calculate z indices of intersecting voxel candidates kz1 = (z21 - db / 2) / y21; zi1 = kz1 * ((float)iy + 0.5f - y1 - ny / 2) + z1 + nz / 2; Zi1 = (int)floor(zi1); // lower boundary of related voxels at y-axis kz2 = (z21 + db / 2) / y21; zi2 = kz2 * ((float)iy + 0.5f - y1 - ny / 2) + z1 + nz / 2; Zi2 = (int)floor(zi2); // upper boundary of related voxels at y-axis // if (Zi1 < 0) // Zi1 = 0; // if (Zi2 >= nz) // Zi2 = nz - 1; // calculate contribution of a voxel to the projection value int ix, iz; float wx1, wx2, wz1, wz2; if (ABS(xi2 - xi1) < 0.01f) continue; if (ABS(zi2 - zi1) < 0.01f) continue; wx1 = (MAX(Xi1, Xi2) - xi1) / (xi2 - xi1); wx2 = 1 - wx1; wz1 = (MAX(Zi1, Zi2) - zi1) / (zi2 - zi1); wz2 = 1 - wz1; // Xi1 == Xi2 && Zi1 == Zi2 if (Xi1 == Xi2 && Zi1 == Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * 1.0f; continue; } // Xi1 != Xi2 && Zi1 == Zi2 if (Xi1 != Xi2 && Zi1 == Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1; ix = Xi2; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2; continue; } // Xi1 == Xi2 && Zi1 != Zi2 if (Xi1 == Xi2 && Zi1 != Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz1; ix = Xi1; iz = Zi2; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wz2; continue; } // Xi1 != Xi2 && Zi1 != Zi2 if (Xi1 != Xi2 && Zi1 != Zi2) { ix = Xi1; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1 * wz1; ix = Xi1; iz = Zi2; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1 * wz2; ix = Xi2; iz = Zi1; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2 * wz1; ix = Xi2; iz = Zi2; if (ix < nx && ix >= 0 && iz < nz && iz >= 0) proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2 * wz2; continue; } } } }
97d22a3ba63694ce85c954d089f4ff2962743597.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* This sample queries the properties of the CUDA devices present in the system. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> // includes, project //#include <cutil_inline.h> //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) printf("There is no device supporting CUDA\n"); int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) printf("There is no device supporting CUDA.\n"); else if (deviceCount == 1) printf("There is 1 device supporting CUDA\n"); else printf("There are %d devices supporting CUDA\n", deviceCount); } printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); printf(" Major revision number: %d\n", deviceProp.major); printf(" Minor revision number: %d\n", deviceProp.minor); printf(" Total amount of global memory: %u bytes\n", (unsigned int)deviceProp.totalGlobalMem); #if CUDART_VERSION >= 2000 printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount); printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount); #endif printf(" Total amount of constant memory: %u bytes\n", (unsigned int)deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %u bytes\n", (unsigned int)deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %u bytes\n", (unsigned int)deviceProp.memPitch); printf(" Texture alignment: %u bytes\n", (unsigned int)deviceProp.textureAlignment); printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 2000 printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No"); #endif } printf("\nTEST PASSED\n"); }
97d22a3ba63694ce85c954d089f4ff2962743597.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* This sample queries the properties of the CUDA devices present in the system. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> // includes, project //#include <cutil_inline.h> //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) printf("There is no device supporting CUDA\n"); int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) printf("There is no device supporting CUDA.\n"); else if (deviceCount == 1) printf("There is 1 device supporting CUDA\n"); else printf("There are %d devices supporting CUDA\n", deviceCount); } printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); printf(" Major revision number: %d\n", deviceProp.major); printf(" Minor revision number: %d\n", deviceProp.minor); printf(" Total amount of global memory: %u bytes\n", (unsigned int)deviceProp.totalGlobalMem); #if CUDART_VERSION >= 2000 printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount); printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount); #endif printf(" Total amount of constant memory: %u bytes\n", (unsigned int)deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %u bytes\n", (unsigned int)deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %u bytes\n", (unsigned int)deviceProp.memPitch); printf(" Texture alignment: %u bytes\n", (unsigned int)deviceProp.textureAlignment); printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 2000 printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No"); #endif } printf("\nTEST PASSED\n"); }
27739cb340d2e858cb4ecba93c7474fbc9ffb589.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <linalg/reduce_cols_by_key.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T> void naiveReduceColsByKey(const T *in, const uint32_t *keys, T *out_ref, uint32_t nrows, uint32_t ncols, uint32_t nkeys, hipStream_t stream) { std::vector<uint32_t> h_keys(ncols, 0u); raft::copy(&(h_keys[0]), keys, ncols, stream); std::vector<T> h_in(nrows * ncols); raft::copy(&(h_in[0]), in, nrows * ncols, stream); CUDA_CHECK(hipStreamSynchronize(stream)); std::vector<T> out(nrows * nkeys, T(0)); for (uint32_t i = 0; i < nrows; ++i) { for (uint32_t j = 0; j < ncols; ++j) { out[i * nkeys + h_keys[j]] += h_in[i * ncols + j]; } } raft::copy(out_ref, &(out[0]), nrows * nkeys, stream); CUDA_CHECK(hipStreamSynchronize(stream)); } template <typename T> struct ReduceColsInputs { T tolerance; uint32_t rows; uint32_t cols; uint32_t nkeys; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const ReduceColsInputs<T> &dims) { return os; } template <typename T> class ReduceColsTest : public ::testing::TestWithParam<ReduceColsInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<ReduceColsInputs<T>>::GetParam(); raft::random::Rng r(params.seed); CUDA_CHECK(hipStreamCreate(&stream)); auto nrows = params.rows; auto ncols = params.cols; auto nkeys = params.nkeys; raft::allocate(in, nrows * ncols); raft::allocate(keys, ncols); raft::allocate(out_ref, nrows * nkeys); raft::allocate(out, nrows * nkeys); r.uniform(in, nrows * ncols, T(-1.0), T(1.0), stream); r.uniformInt(keys, ncols, 0u, params.nkeys, stream); naiveReduceColsByKey(in, keys, out_ref, nrows, ncols, nkeys, stream); reduce_cols_by_key(in, keys, out, nrows, ncols, nkeys, stream); CUDA_CHECK(hipStreamSynchronize(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); CUDA_CHECK(hipFree(keys)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: hipStream_t stream; ReduceColsInputs<T> params; T *in, *out_ref, *out; uint32_t *keys; }; const std::vector<ReduceColsInputs<float>> inputsf = { {0.0001f, 128, 32, 6, 1234ULL}, {0.0005f, 121, 63, 10, 1234ULL}}; typedef ReduceColsTest<float> ReduceColsTestF; TEST_P(ReduceColsTestF, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.rows * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceColsTests, ReduceColsTestF, ::testing::ValuesIn(inputsf)); const std::vector<ReduceColsInputs<double>> inputsd2 = { {0.0000001, 128, 32, 6, 1234ULL}, {0.0000001, 121, 63, 10, 1234ULL}}; typedef ReduceColsTest<double> ReduceColsTestD; TEST_P(ReduceColsTestD, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.rows * params.nkeys, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceColsTests, ReduceColsTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
27739cb340d2e858cb4ecba93c7474fbc9ffb589.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <linalg/reduce_cols_by_key.cuh> #include <random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename T> void naiveReduceColsByKey(const T *in, const uint32_t *keys, T *out_ref, uint32_t nrows, uint32_t ncols, uint32_t nkeys, cudaStream_t stream) { std::vector<uint32_t> h_keys(ncols, 0u); raft::copy(&(h_keys[0]), keys, ncols, stream); std::vector<T> h_in(nrows * ncols); raft::copy(&(h_in[0]), in, nrows * ncols, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); std::vector<T> out(nrows * nkeys, T(0)); for (uint32_t i = 0; i < nrows; ++i) { for (uint32_t j = 0; j < ncols; ++j) { out[i * nkeys + h_keys[j]] += h_in[i * ncols + j]; } } raft::copy(out_ref, &(out[0]), nrows * nkeys, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } template <typename T> struct ReduceColsInputs { T tolerance; uint32_t rows; uint32_t cols; uint32_t nkeys; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const ReduceColsInputs<T> &dims) { return os; } template <typename T> class ReduceColsTest : public ::testing::TestWithParam<ReduceColsInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<ReduceColsInputs<T>>::GetParam(); raft::random::Rng r(params.seed); CUDA_CHECK(cudaStreamCreate(&stream)); auto nrows = params.rows; auto ncols = params.cols; auto nkeys = params.nkeys; raft::allocate(in, nrows * ncols); raft::allocate(keys, ncols); raft::allocate(out_ref, nrows * nkeys); raft::allocate(out, nrows * nkeys); r.uniform(in, nrows * ncols, T(-1.0), T(1.0), stream); r.uniformInt(keys, ncols, 0u, params.nkeys, stream); naiveReduceColsByKey(in, keys, out_ref, nrows, ncols, nkeys, stream); reduce_cols_by_key(in, keys, out, nrows, ncols, nkeys, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); CUDA_CHECK(cudaFree(keys)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: cudaStream_t stream; ReduceColsInputs<T> params; T *in, *out_ref, *out; uint32_t *keys; }; const std::vector<ReduceColsInputs<float>> inputsf = { {0.0001f, 128, 32, 6, 1234ULL}, {0.0005f, 121, 63, 10, 1234ULL}}; typedef ReduceColsTest<float> ReduceColsTestF; TEST_P(ReduceColsTestF, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.rows * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceColsTests, ReduceColsTestF, ::testing::ValuesIn(inputsf)); const std::vector<ReduceColsInputs<double>> inputsd2 = { {0.0000001, 128, 32, 6, 1234ULL}, {0.0000001, 121, 63, 10, 1234ULL}}; typedef ReduceColsTest<double> ReduceColsTestD; TEST_P(ReduceColsTestD, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.rows * params.nkeys, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceColsTests, ReduceColsTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
20b4ab6f86033a39fb0a1e46e6d15d09ce9c107d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matrixMultiplyUpdateWeights_tanh(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns, float learning_rate) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = C[Row*numCColumns+Col] - learning_rate * (Pvalue / numAColumns); }
20b4ab6f86033a39fb0a1e46e6d15d09ce9c107d.cu
#include "includes.h" __global__ void matrixMultiplyUpdateWeights_tanh(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns, float learning_rate) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = C[Row*numCColumns+Col] - learning_rate * (Pvalue / numAColumns); }
a6331fae61660fbf9831b276c4cd6f3f0c39c409.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include "../DataBuffer.h" #include <DataTypeUtils.h> #include <op_boilerplate.h> #include <exceptions/cuda_exception.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// void DataBuffer::allocateSpecial() { if (_specialBuffer == nullptr && getLenInBytes() > 0) { ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t); _isOwnerSpecial = true; } } //////////////////////////////////////////////////////////////////////// void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) { if(isPrimaryActual() && !forceSync) return; allocatePrimary(); auto res = hipStreamSynchronize(*context->getCudaStream()); if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res); hipMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToHost); readPrimary(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::syncToSpecial(const bool forceSync) { if(isSpecialActual() && !forceSync) return; allocateSpecial(); hipMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), hipMemcpyHostToDevice); readSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::deleteSpecial() { if(_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) { auto p = reinterpret_cast<int8_t*>(_specialBuffer); RELEASE_SPECIAL(p, _workspace); _specialBuffer = nullptr; _isOwnerSpecial = false; } } //////////////////////////////////////////////////////////////////////// void DataBuffer::setCountersToZero() { _counter.store(0L); _writePrimary.store(0L); _writeSpecial.store(0L); _readPrimary.store(0L); _readSpecial.store(0L); } //////////////////////////////////////////////////////////////////////// void DataBuffer::copyCounters(const DataBuffer& other) { _counter.store(other._counter); _writePrimary.store(other._readSpecial); _writeSpecial.store(other._readPrimary); _readPrimary.store(other._writeSpecial); _readSpecial.store(other._writePrimary); } //////////////////////////////////////////////////////////////////////// void DataBuffer::memcpy(const DataBuffer &dst, const DataBuffer &src) { if (src._lenInBytes < dst._lenInBytes) throw std::runtime_error("DataBuffer::memcpy: Source data buffer is smaller than destination"); if (src.isSpecialActual()) { hipMemcpy(dst._specialBuffer, src._specialBuffer, dst.getLenInBytes(), hipMemcpyDeviceToDevice); } else if (src.isPrimaryActual()) { hipMemcpy(dst._specialBuffer, src._primaryBuffer, dst.getLenInBytes(), hipMemcpyHostToDevice); } dst.writeSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetOther) { // copies only to special buffer if(other._primaryBuffer == nullptr && other._specialBuffer == nullptr) return; if(sizeToCopyinBytes == 0) sizeToCopyinBytes = other.getLenInBytes(); if(sizeToCopyinBytes == 0) return; if(other.isPrimaryActual()) { auto res = hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, hipMemcpyHostToDevice); if (res != 0) throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res); other.readPrimary(); } else { auto res = hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, hipMemcpyDeviceToDevice); if (res != 0) throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res); other.readSpecial(); } writeSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetHostBuffer) { // copies only to special buffer if(hostBuffer == nullptr) return; if(sizeToCopyinBytes == 0) sizeToCopyinBytes = getLenInBytes(); if(sizeToCopyinBytes == 0) return; auto res = hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType), sizeToCopyinBytes, hipMemcpyHostToDevice); if (res != 0) throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res); writeSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) { deleteSpecial(); _specialBuffer = special; _isOwnerSpecial = isOwnerSpecial; } //////////////////////////////////////////////////////////////////////// void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case) allocateSpecial(); if(allocBoth) allocatePrimary(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::setToZeroBuffers(const bool both) { hipMemset(special(), 0, getLenInBytes()); writeSpecial(); if(both) { memset(primary(), 0, getLenInBytes()); readPrimary(); } } //////////////////////////////////////////////////////////////////////// void DataBuffer::migrate() { memory::Workspace* newWorkspace = nullptr; void* newBuffer; ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t); hipMemcpy(newBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToDevice); if (_isOwnerSpecial) { // now we're releasing original buffer RELEASE_SPECIAL(_specialBuffer, _workspace); } _isOwnerSpecial = true; _specialBuffer = newBuffer; } //////////////////////////////////////////////////////////////////////// void DataBuffer::writePrimary() const { _writePrimary = ++_counter; } void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; } void DataBuffer::readPrimary() const { _readPrimary = ++_counter; } void DataBuffer::readSpecial() const { _readSpecial = ++_counter; } bool DataBuffer::isPrimaryActual() const { return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load()); } bool DataBuffer::isSpecialActual() const { return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load()); } }
a6331fae61660fbf9831b276c4cd6f3f0c39c409.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include "../DataBuffer.h" #include <DataTypeUtils.h> #include <op_boilerplate.h> #include <exceptions/cuda_exception.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// void DataBuffer::allocateSpecial() { if (_specialBuffer == nullptr && getLenInBytes() > 0) { ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t); _isOwnerSpecial = true; } } //////////////////////////////////////////////////////////////////////// void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) { if(isPrimaryActual() && !forceSync) return; allocatePrimary(); auto res = cudaStreamSynchronize(*context->getCudaStream()); if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res); cudaMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToHost); readPrimary(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::syncToSpecial(const bool forceSync) { if(isSpecialActual() && !forceSync) return; allocateSpecial(); cudaMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), cudaMemcpyHostToDevice); readSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::deleteSpecial() { if(_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) { auto p = reinterpret_cast<int8_t*>(_specialBuffer); RELEASE_SPECIAL(p, _workspace); _specialBuffer = nullptr; _isOwnerSpecial = false; } } //////////////////////////////////////////////////////////////////////// void DataBuffer::setCountersToZero() { _counter.store(0L); _writePrimary.store(0L); _writeSpecial.store(0L); _readPrimary.store(0L); _readSpecial.store(0L); } //////////////////////////////////////////////////////////////////////// void DataBuffer::copyCounters(const DataBuffer& other) { _counter.store(other._counter); _writePrimary.store(other._readSpecial); _writeSpecial.store(other._readPrimary); _readPrimary.store(other._writeSpecial); _readSpecial.store(other._writePrimary); } //////////////////////////////////////////////////////////////////////// void DataBuffer::memcpy(const DataBuffer &dst, const DataBuffer &src) { if (src._lenInBytes < dst._lenInBytes) throw std::runtime_error("DataBuffer::memcpy: Source data buffer is smaller than destination"); if (src.isSpecialActual()) { cudaMemcpy(dst._specialBuffer, src._specialBuffer, dst.getLenInBytes(), cudaMemcpyDeviceToDevice); } else if (src.isPrimaryActual()) { cudaMemcpy(dst._specialBuffer, src._primaryBuffer, dst.getLenInBytes(), cudaMemcpyHostToDevice); } dst.writeSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetOther) { // copies only to special buffer if(other._primaryBuffer == nullptr && other._specialBuffer == nullptr) return; if(sizeToCopyinBytes == 0) sizeToCopyinBytes = other.getLenInBytes(); if(sizeToCopyinBytes == 0) return; if(other.isPrimaryActual()) { auto res = cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, cudaMemcpyHostToDevice); if (res != 0) throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res); other.readPrimary(); } else { auto res = cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType), sizeToCopyinBytes, cudaMemcpyDeviceToDevice); if (res != 0) throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res); other.readSpecial(); } writeSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const Nd4jLong offsetThis, const Nd4jLong offsetHostBuffer) { // copies only to special buffer if(hostBuffer == nullptr) return; if(sizeToCopyinBytes == 0) sizeToCopyinBytes = getLenInBytes(); if(sizeToCopyinBytes == 0) return; auto res = cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType), static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType), sizeToCopyinBytes, cudaMemcpyHostToDevice); if (res != 0) throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res); writeSpecial(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) { deleteSpecial(); _specialBuffer = special; _isOwnerSpecial = isOwnerSpecial; } //////////////////////////////////////////////////////////////////////// void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case) allocateSpecial(); if(allocBoth) allocatePrimary(); } //////////////////////////////////////////////////////////////////////// void DataBuffer::setToZeroBuffers(const bool both) { cudaMemset(special(), 0, getLenInBytes()); writeSpecial(); if(both) { memset(primary(), 0, getLenInBytes()); readPrimary(); } } //////////////////////////////////////////////////////////////////////// void DataBuffer::migrate() { memory::Workspace* newWorkspace = nullptr; void* newBuffer; ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t); cudaMemcpy(newBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToDevice); if (_isOwnerSpecial) { // now we're releasing original buffer RELEASE_SPECIAL(_specialBuffer, _workspace); } _isOwnerSpecial = true; _specialBuffer = newBuffer; } //////////////////////////////////////////////////////////////////////// void DataBuffer::writePrimary() const { _writePrimary = ++_counter; } void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; } void DataBuffer::readPrimary() const { _readPrimary = ++_counter; } void DataBuffer::readSpecial() const { _readSpecial = ++_counter; } bool DataBuffer::isPrimaryActual() const { return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load()); } bool DataBuffer::isSpecialActual() const { return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load()); } }
e3ed316ea1cbe58d2a7e8d4f0ca3e2ef8174d6e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 29-Oct-2012 09:37:53 // // user function __device__ #include "bres_calc.h" // CUDA kernel function __global__ void op_cuda_bres_calc( double *ind_arg0, double *ind_arg1, double *ind_arg2, double *ind_arg3, int *ind_map, short *arg_map, int *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg4_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ double *ind_arg0_s; __shared__ double *ind_arg1_s; __shared__ double *ind_arg2_s; __shared__ double *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4]; ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4]; ind_arg2_map = &ind_map[3*set_size] + ind_arg_offs[2+blockId*4]; ind_arg3_map = &ind_map[4*set_size] + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2); ind_arg1_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4); ind_arg2_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1); ind_arg3_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_double; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg4_l[d] = ZERO_double; // user-supplied kernel call bres_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg1_s+arg_map[2*set_size+n+offset_b]*4, ind_arg2_s+arg_map[3*set_size+n+offset_b]*1, arg4_l, arg5+(n+offset_b)*1 ); col2 = colors[n+offset_b]; } // store local variables int arg4_map; if (col2>=0) { arg4_map = arg_map[4*set_size+n+offset_b]; } for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg4_map*4] += arg4_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 4; int inds[6] = {0,0,1,2,3,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: bres_calc\n"); } // get plan #ifdef OP_PART_SIZE_3 int part_size = OP_PART_SIZE_3; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(3); OP_kernels[3].name = name; OP_kernels[3].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all_cuda(nargs,args); #ifdef OP_BLOCK_SIZE_3 int nthread = OP_BLOCK_SIZE_3; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0, (double *)arg0.data_d, (double *)arg2.data_d, (double *)arg3.data_d, (double *)arg4.data_d, Plan->ind_map, Plan->loc_map, (int *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_bres_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(3); OP_kernels[3].transfer += Plan->transfer; OP_kernels[3].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[3].time += wall_t2 - wall_t1; }
e3ed316ea1cbe58d2a7e8d4f0ca3e2ef8174d6e2.cu
// // auto-generated by op2.m on 29-Oct-2012 09:37:53 // // user function __device__ #include "bres_calc.h" // CUDA kernel function __global__ void op_cuda_bres_calc( double *ind_arg0, double *ind_arg1, double *ind_arg2, double *ind_arg3, int *ind_map, short *arg_map, int *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg4_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ double *ind_arg0_s; __shared__ double *ind_arg1_s; __shared__ double *ind_arg2_s; __shared__ double *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4]; ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4]; ind_arg2_map = &ind_map[3*set_size] + ind_arg_offs[2+blockId*4]; ind_arg3_map = &ind_map[4*set_size] + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2); ind_arg1_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4); ind_arg2_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1); ind_arg3_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_double; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg4_l[d] = ZERO_double; // user-supplied kernel call bres_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg1_s+arg_map[2*set_size+n+offset_b]*4, ind_arg2_s+arg_map[3*set_size+n+offset_b]*1, arg4_l, arg5+(n+offset_b)*1 ); col2 = colors[n+offset_b]; } // store local variables int arg4_map; if (col2>=0) { arg4_map = arg_map[4*set_size+n+offset_b]; } for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg4_map*4] += arg4_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_bres_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 4; int inds[6] = {0,0,1,2,3,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: bres_calc\n"); } // get plan #ifdef OP_PART_SIZE_3 int part_size = OP_PART_SIZE_3; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(3); OP_kernels[3].name = name; OP_kernels[3].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all_cuda(nargs,args); #ifdef OP_BLOCK_SIZE_3 int nthread = OP_BLOCK_SIZE_3; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; op_cuda_bres_calc<<<nblocks,nthread,nshared>>>( (double *)arg0.data_d, (double *)arg2.data_d, (double *)arg3.data_d, (double *)arg4.data_d, Plan->ind_map, Plan->loc_map, (int *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_bres_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(3); OP_kernels[3].transfer += Plan->transfer; OP_kernels[3].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[3].time += wall_t2 - wall_t1; }
1657b4a4ae66970a837380c87682e6fd09bb7bdd.hip
// !!! This is a file automatically generated by hipify!!! // // auto-generated by ops.py// //header #define OPS_ACC_MD_MACROS #define OPS_2D #include "ops_lib_cpp.h" #include "ops_cuda_rt_support.h" #include "ops_cuda_reduction.h" #include "user_types.h" #ifdef OPS_MPI #include "ops_mpi_core.h" #endif // global constants __constant__ field_type field; __constant__ grid_type grid; __constant__ int number_of_states; __constant__ state_type *states; __constant__ int g_circ; __constant__ int g_point; __constant__ int g_rect; void ops_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!strcmp(name,"field")) { cutilSafeCall(hipMemcpyToSymbol(field, dat, dim*size)); } else if (!strcmp(name,"grid")) { cutilSafeCall(hipMemcpyToSymbol(grid, dat, dim*size)); } else if (!strcmp(name,"number_of_states")) { cutilSafeCall(hipMemcpyToSymbol(number_of_states, dat, dim*size)); } else if (!strcmp(name,"states")) { char *temp; cutilSafeCall(hipMalloc((void**)&temp,dim*size)); cutilSafeCall(hipMemcpy(temp,dat,dim*size,hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpyToSymbol(states, &temp, sizeof(char *))); } else if (!strcmp(name,"g_circ")) { cutilSafeCall(hipMemcpyToSymbol(g_circ, dat, dim*size)); } else if (!strcmp(name,"g_point")) { cutilSafeCall(hipMemcpyToSymbol(g_point, dat, dim*size)); } else if (!strcmp(name,"g_rect")) { cutilSafeCall(hipMemcpyToSymbol(g_rect, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "field_summary_kernel_cuda_kernel.cu" #include "generate_chunk_kernel_cuda_kernel.cu" #include "initialise_chunk_kernel_zero_hip_kernel.hip" #include "initialise_chunk_kernel_zero_x_hip_kernel.hip" #include "initialise_chunk_kernel_zero_y_cuda_kernel.cu" #include "initialise_chunk_kernel_xx_cuda_kernel.cu" #include "initialise_chunk_kernel_yy_cuda_kernel.cu" #include "initialise_chunk_kernel_x_cuda_kernel.cu" #include "initialise_chunk_kernel_y_cuda_kernel.cu" #include "initialise_chunk_kernel_cellx_cuda_kernel.cu" #include "initialise_chunk_kernel_celly_cuda_kernel.cu" #include "initialise_chunk_kernel_volume_cuda_kernel.cu" #include "set_field_kernel_cuda_kernel.cu" #include "tea_leaf_init_zero2_kernel_cuda_kernel.cu" #include "tea_leaf_yeqx_kernel_cuda_kernel.cu" #include "tea_leaf_dot_kernel_cuda_kernel.cu" #include "tea_leaf_cg_calc_w_reduce_kernel_cuda_kernel.cu" #include "tea_leaf_axpy_kernel_cuda_kernel.cu" #include "tea_leaf_cg_calc_ur_r_reduce_kernel_cuda_kernel.cu" #include "tea_leaf_axpby_kernel_cuda_kernel.cu" #include "tea_leaf_cheby_init_kernel_cuda_kernel.cu" #include "tea_leaf_recip3_kernel_cuda_kernel.cu" #include "tea_leaf_xpy_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_u_u0_kernel_cuda_kernel.cu" #include "tea_leaf_recip_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_Kx_Ky_kernel_cuda_kernel.cu" #include "tea_leaf_init_zero_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_kernel_cuda_kernel.cu" #include "tea_leaf_recip2_kernel_cuda_kernel.cu" #include "tea_leaf_common_residual_kernel_cuda_kernel.cu" #include "tea_leaf_norm2_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_diag_init_kernel_cuda_kernel.cu" #include "tea_leaf_zeqxty_kernel_cuda_kernel.cu" #include "tea_leaf_jacobi_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_init1_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_init2_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_inner1_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_inner2_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_reduce_kernel_cuda_kernel.cu" #include "update_halo_kernel1_b2_cuda_kernel.cu" #include "update_halo_kernel1_b1_cuda_kernel.cu" #include "update_halo_kernel1_t2_cuda_kernel.cu" #include "update_halo_kernel1_t1_cuda_kernel.cu" #include "update_halo_kernel1_l2_cuda_kernel.cu" #include "update_halo_kernel1_l1_hip_kernel.hip" #include "update_halo_kernel1_r2_cuda_kernel.cu" #include "update_halo_kernel1_r1_cuda_kernel.cu"
1657b4a4ae66970a837380c87682e6fd09bb7bdd.cu
// // auto-generated by ops.py// //header #define OPS_ACC_MD_MACROS #define OPS_2D #include "ops_lib_cpp.h" #include "ops_cuda_rt_support.h" #include "ops_cuda_reduction.h" #include "user_types.h" #ifdef OPS_MPI #include "ops_mpi_core.h" #endif // global constants __constant__ field_type field; __constant__ grid_type grid; __constant__ int number_of_states; __constant__ state_type *states; __constant__ int g_circ; __constant__ int g_point; __constant__ int g_rect; void ops_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ if (!strcmp(name,"field")) { cutilSafeCall(cudaMemcpyToSymbol(field, dat, dim*size)); } else if (!strcmp(name,"grid")) { cutilSafeCall(cudaMemcpyToSymbol(grid, dat, dim*size)); } else if (!strcmp(name,"number_of_states")) { cutilSafeCall(cudaMemcpyToSymbol(number_of_states, dat, dim*size)); } else if (!strcmp(name,"states")) { char *temp; cutilSafeCall(cudaMalloc((void**)&temp,dim*size)); cutilSafeCall(cudaMemcpy(temp,dat,dim*size,cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpyToSymbol(states, &temp, sizeof(char *))); } else if (!strcmp(name,"g_circ")) { cutilSafeCall(cudaMemcpyToSymbol(g_circ, dat, dim*size)); } else if (!strcmp(name,"g_point")) { cutilSafeCall(cudaMemcpyToSymbol(g_point, dat, dim*size)); } else if (!strcmp(name,"g_rect")) { cutilSafeCall(cudaMemcpyToSymbol(g_rect, dat, dim*size)); } else { printf("error: unknown const name\n"); exit(1); } } //user kernel files #include "field_summary_kernel_cuda_kernel.cu" #include "generate_chunk_kernel_cuda_kernel.cu" #include "initialise_chunk_kernel_zero_cuda_kernel.cu" #include "initialise_chunk_kernel_zero_x_cuda_kernel.cu" #include "initialise_chunk_kernel_zero_y_cuda_kernel.cu" #include "initialise_chunk_kernel_xx_cuda_kernel.cu" #include "initialise_chunk_kernel_yy_cuda_kernel.cu" #include "initialise_chunk_kernel_x_cuda_kernel.cu" #include "initialise_chunk_kernel_y_cuda_kernel.cu" #include "initialise_chunk_kernel_cellx_cuda_kernel.cu" #include "initialise_chunk_kernel_celly_cuda_kernel.cu" #include "initialise_chunk_kernel_volume_cuda_kernel.cu" #include "set_field_kernel_cuda_kernel.cu" #include "tea_leaf_init_zero2_kernel_cuda_kernel.cu" #include "tea_leaf_yeqx_kernel_cuda_kernel.cu" #include "tea_leaf_dot_kernel_cuda_kernel.cu" #include "tea_leaf_cg_calc_w_reduce_kernel_cuda_kernel.cu" #include "tea_leaf_axpy_kernel_cuda_kernel.cu" #include "tea_leaf_cg_calc_ur_r_reduce_kernel_cuda_kernel.cu" #include "tea_leaf_axpby_kernel_cuda_kernel.cu" #include "tea_leaf_cheby_init_kernel_cuda_kernel.cu" #include "tea_leaf_recip3_kernel_cuda_kernel.cu" #include "tea_leaf_xpy_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_u_u0_kernel_cuda_kernel.cu" #include "tea_leaf_recip_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_Kx_Ky_kernel_cuda_kernel.cu" #include "tea_leaf_init_zero_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_kernel_cuda_kernel.cu" #include "tea_leaf_recip2_kernel_cuda_kernel.cu" #include "tea_leaf_common_residual_kernel_cuda_kernel.cu" #include "tea_leaf_norm2_kernel_cuda_kernel.cu" #include "tea_leaf_common_init_diag_init_kernel_cuda_kernel.cu" #include "tea_leaf_zeqxty_kernel_cuda_kernel.cu" #include "tea_leaf_jacobi_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_init1_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_init2_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_inner1_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_inner2_kernel_cuda_kernel.cu" #include "tea_leaf_ppcg_reduce_kernel_cuda_kernel.cu" #include "update_halo_kernel1_b2_cuda_kernel.cu" #include "update_halo_kernel1_b1_cuda_kernel.cu" #include "update_halo_kernel1_t2_cuda_kernel.cu" #include "update_halo_kernel1_t1_cuda_kernel.cu" #include "update_halo_kernel1_l2_cuda_kernel.cu" #include "update_halo_kernel1_l1_cuda_kernel.cu" #include "update_halo_kernel1_r2_cuda_kernel.cu" #include "update_halo_kernel1_r1_cuda_kernel.cu"
ff0f4ff8b6110ceb93314bfbd58aed93a812d5da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstddef> #include <cstdint> #include <stdexcept> #include <utility> #include <algorithm> #include <cmath> #include <complex> #include <cstdlib> #include <iomanip> #include <iostream> #include <random> #include <vector> #include <sys/time.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "FftComplex.hpp" using std::complex; using std::cout; using std::endl; using std::vector; using std::size_t; using std::uintmax_t; __global__ void transformCUDA(int *bloques,size_t *n,size_t *size,size_t *halfsize,size_t *tablestep, complex<double> *vec,complex<double> *expTable,int * IndexToBeProcessed){ int index = threadIdx.x + blockIdx.x * blockDim.x; int hilos = blockDim.x * (*bloques); int from = (int)(((*n/2)/hilos)*index); int to = (int)(((*n/2)/hilos)*(index+1))-1; //if (index == hilos-1){ //to = (int)(*n/2 - 1); //} IndexToBeProcessed[0]= 0; for (int x = 1;x < *n/2;x++){ if((IndexToBeProcessed[x-1]+1)% *halfsize==0){ IndexToBeProcessed[x] = IndexToBeProcessed[x-1] + (int)*halfsize + 1; }else{ IndexToBeProcessed[x] = IndexToBeProcessed[x-1] + 1; } //printf("index %d to be processed: %d\n",x,IndexToBeProcessed[x]); if (x == *n/2) break; } for(int j = from;j<=to;j++){ size_t k = *tablestep*(IndexToBeProcessed[j]%*size); size_t l = IndexToBeProcessed[j] + *halfsize; double temp_real = real(vec[l]) * real(expTable[k]) - imag(vec[l]) * imag(expTable[k]); double temp_img = real(vec[l]) * imag(expTable[k]) + imag(vec[l]) * real(expTable[k]); double vec_real = real(vec[IndexToBeProcessed[j]]); double vec_img = imag(vec[IndexToBeProcessed[j]]); complex<double>temp(vec_real-temp_real,vec_img-temp_img); vec[l] = temp; complex<double>temp2(vec_real+temp_real,vec_img+temp_img); vec[IndexToBeProcessed[j]] = temp2; } /*for (size_t i = 0; i < *n; i += *size) { for (size_t j = i, k = 0; j < i + *halfsize; j++, k += *tablestep) { //complex<double> temp = vec[j + *halfsize] * expTable[k]; //vec[j + *halfsize] = vec[j] - temp; //vec[j] += temp; double temp_real = real(vec[j + *halfsize]) * real(expTable[k]) - imag(vec[j + *halfsize]) * imag(expTable[k]); double temp_img = real(vec[j + *halfsize]) * imag(expTable[k]) + imag(vec[j + *halfsize]) * real(expTable[k]); double vec_real = real(vec[j]); double vec_img = imag(vec[j]); complex<double>temp(vec_real-temp_real,vec_img-temp_img); vec[j + *halfsize] = temp; complex<double>temp2(vec_real+temp_real,vec_img+temp_img); vec[j] = temp2; } }*/ } // Private function prototypes static size_t reverseBits(size_t val, int width); static void testFft(int n,int hilos,int bloques); static thrust::host_vector<complex<double> > randomComplexes(int n); void Fft::transform(thrust::host_vector<complex<double> > &vec, bool inverse,int hilos,int bloques) { size_t n = vec.size(); if (n == 0) return; else if ((n & (n - 1)) == 0) // Is power of 2 Fft::transformRadix2(vec, inverse , hilos,bloques); else // More complicated algorithm for arbitrary sizes printf("is not power of 2\n"); } void Fft::transformRadix2(thrust::host_vector<complex<double> > &vec, bool inverse,int hilos,int bloques ) { // Length variables size_t n = vec.size(); int levels = 0; // Compute levels = floor(log2(n)) for (size_t temp = n; temp > 1U; temp >>= 1) levels++; if (static_cast<size_t>(1U) << levels != n) throw std::domain_error("Length is not a power of 2"); // Trigonometric table thrust::host_vector<complex<double> > expTable(n / 2); for (size_t i = 0; i < n / 2; i++) expTable[i] = std::polar(1.0, (inverse ? 2 : -2) * M_PI * i / n); // Bit-reversed addressing permutation for (size_t i = 0; i < n; i++) { size_t j = reverseBits(i, levels); if (j > i) std::swap(vec[i], vec[j]); } //CUDA int *d_bloques; size_t *d_n; hipMalloc((void **)&d_bloques, sizeof(int)); hipMemcpy(d_bloques, &bloques, sizeof(int), hipMemcpyHostToDevice); hipMalloc((void **)&d_n, sizeof(size_t)); hipMemcpy(d_n, &n, sizeof(size_t), hipMemcpyHostToDevice); thrust::device_vector<complex<double> > d_expTable = expTable; thrust::device_vector<complex<double> > d_vec = vec; complex<double> * d_expTable_pointer = thrust::raw_pointer_cast(&d_expTable[0]); complex<double> * d_vec_pointer = thrust::raw_pointer_cast(&d_vec[0]); struct timeval* tval_before,* tval_after,* tval_result; tval_before = (struct timeval*)malloc(sizeof(struct timeval)); tval_after = (struct timeval*)malloc(sizeof(struct timeval)); tval_result = (struct timeval*)malloc(sizeof(struct timeval)); int IndexToBeProcessed [n/2]; // Cooley-Tukey decimation-in-time radix-2 FFT gettimeofday(tval_before, NULL); for (size_t size = 2; size <= n; size *= 2) { size_t halfsize = size / 2; size_t tablestep = n / size; size_t *d_halfsize,*d_tablestep,*d_size; int * d_IndexToBeProcessed; hipMalloc((void **)&d_IndexToBeProcessed, sizeof(int)*(n/2)); hipMalloc((void **)&d_halfsize, sizeof(size_t)); hipMalloc((void **)&d_tablestep, sizeof(size_t)); hipMalloc((void **)&d_size, sizeof(size_t)); hipMemcpy(d_halfsize, &halfsize, sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy(d_tablestep, &tablestep, sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy(d_size, &size, sizeof(size_t), hipMemcpyHostToDevice); hipMemcpy(d_IndexToBeProcessed, &IndexToBeProcessed, sizeof(int)*(n/2), hipMemcpyHostToDevice); hipStream_t stream; hipStreamCreate(&stream); hipLaunchKernelGGL(( transformCUDA), dim3(bloques),dim3(hilos),0,stream, d_bloques,d_n,d_size,d_halfsize,d_tablestep,d_vec_pointer,d_expTable_pointer,d_IndexToBeProcessed); hipStreamSynchronize(stream); hipFree(d_halfsize); hipFree(d_tablestep); hipFree(d_size); //for (size_t i = 0; i < n; i += size) { //for (size_t j = i, k = 0; j < i + halfsize; j++, k += tablestep) { //complex<double> temp = vec[j + halfsize] * expTable[k]; //vec[j + halfsize] = vec[j] - temp; //vec[j] += temp; //} //} if (size == n) // Prevent overflow in 'size *= 2' break; } vec = d_vec; gettimeofday(tval_after, NULL); timersub(tval_after, tval_before, tval_result); printf("%ld.%06ld\n", (long int)tval_result->tv_sec, (long int)tval_result->tv_usec); hipFree(d_n); hipFree(d_bloques); } static size_t reverseBits(size_t val, int width) { size_t result = 0; for (int i = 0; i < width; i++, val >>= 1) result = (result << 1) | (val & 1U); return result; } int main(int argc, char *argv[]) { int hilos = atoi(argv[1]); int bloques = atoi(argv[2]); size_t n = 16384; testFft(n,hilos, bloques); return 0; } static void testFft(int n,int hilos,int bloques) { const thrust::host_vector<complex<double> > input = randomComplexes(n); thrust::host_vector<complex<double> > actual = input; //thrust::host_vector<complex<double> > actual (n); //actual[0] = 2; //actual[1] = 3; //actual[2] = -1; //actual[3] = 1; Fft::transform(actual, false,hilos, bloques); //for(int i = 0;i < n;i++){ //printf("%f + %fi\n", real(actual[i]), imag(actual[i])); //} } /*---- Utility functions ----*/ static thrust::host_vector<complex<double> > randomComplexes(int n) { std::uniform_real_distribution<double> valueDist(-1.0, 1.0); thrust::host_vector<complex<double> > result; for (int i = 0; i < n; i++) result.push_back(complex<double>(rand()%100, rand()%100)); return result; }
ff0f4ff8b6110ceb93314bfbd58aed93a812d5da.cu
#include <cstddef> #include <cstdint> #include <stdexcept> #include <utility> #include <algorithm> #include <cmath> #include <complex> #include <cstdlib> #include <iomanip> #include <iostream> #include <random> #include <vector> #include <sys/time.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "FftComplex.hpp" using std::complex; using std::cout; using std::endl; using std::vector; using std::size_t; using std::uintmax_t; __global__ void transformCUDA(int *bloques,size_t *n,size_t *size,size_t *halfsize,size_t *tablestep, complex<double> *vec,complex<double> *expTable,int * IndexToBeProcessed){ int index = threadIdx.x + blockIdx.x * blockDim.x; int hilos = blockDim.x * (*bloques); int from = (int)(((*n/2)/hilos)*index); int to = (int)(((*n/2)/hilos)*(index+1))-1; //if (index == hilos-1){ //to = (int)(*n/2 - 1); //} IndexToBeProcessed[0]= 0; for (int x = 1;x < *n/2;x++){ if((IndexToBeProcessed[x-1]+1)% *halfsize==0){ IndexToBeProcessed[x] = IndexToBeProcessed[x-1] + (int)*halfsize + 1; }else{ IndexToBeProcessed[x] = IndexToBeProcessed[x-1] + 1; } //printf("index %d to be processed: %d\n",x,IndexToBeProcessed[x]); if (x == *n/2) break; } for(int j = from;j<=to;j++){ size_t k = *tablestep*(IndexToBeProcessed[j]%*size); size_t l = IndexToBeProcessed[j] + *halfsize; double temp_real = real(vec[l]) * real(expTable[k]) - imag(vec[l]) * imag(expTable[k]); double temp_img = real(vec[l]) * imag(expTable[k]) + imag(vec[l]) * real(expTable[k]); double vec_real = real(vec[IndexToBeProcessed[j]]); double vec_img = imag(vec[IndexToBeProcessed[j]]); complex<double>temp(vec_real-temp_real,vec_img-temp_img); vec[l] = temp; complex<double>temp2(vec_real+temp_real,vec_img+temp_img); vec[IndexToBeProcessed[j]] = temp2; } /*for (size_t i = 0; i < *n; i += *size) { for (size_t j = i, k = 0; j < i + *halfsize; j++, k += *tablestep) { //complex<double> temp = vec[j + *halfsize] * expTable[k]; //vec[j + *halfsize] = vec[j] - temp; //vec[j] += temp; double temp_real = real(vec[j + *halfsize]) * real(expTable[k]) - imag(vec[j + *halfsize]) * imag(expTable[k]); double temp_img = real(vec[j + *halfsize]) * imag(expTable[k]) + imag(vec[j + *halfsize]) * real(expTable[k]); double vec_real = real(vec[j]); double vec_img = imag(vec[j]); complex<double>temp(vec_real-temp_real,vec_img-temp_img); vec[j + *halfsize] = temp; complex<double>temp2(vec_real+temp_real,vec_img+temp_img); vec[j] = temp2; } }*/ } // Private function prototypes static size_t reverseBits(size_t val, int width); static void testFft(int n,int hilos,int bloques); static thrust::host_vector<complex<double> > randomComplexes(int n); void Fft::transform(thrust::host_vector<complex<double> > &vec, bool inverse,int hilos,int bloques) { size_t n = vec.size(); if (n == 0) return; else if ((n & (n - 1)) == 0) // Is power of 2 Fft::transformRadix2(vec, inverse , hilos,bloques); else // More complicated algorithm for arbitrary sizes printf("is not power of 2\n"); } void Fft::transformRadix2(thrust::host_vector<complex<double> > &vec, bool inverse,int hilos,int bloques ) { // Length variables size_t n = vec.size(); int levels = 0; // Compute levels = floor(log2(n)) for (size_t temp = n; temp > 1U; temp >>= 1) levels++; if (static_cast<size_t>(1U) << levels != n) throw std::domain_error("Length is not a power of 2"); // Trigonometric table thrust::host_vector<complex<double> > expTable(n / 2); for (size_t i = 0; i < n / 2; i++) expTable[i] = std::polar(1.0, (inverse ? 2 : -2) * M_PI * i / n); // Bit-reversed addressing permutation for (size_t i = 0; i < n; i++) { size_t j = reverseBits(i, levels); if (j > i) std::swap(vec[i], vec[j]); } //CUDA int *d_bloques; size_t *d_n; cudaMalloc((void **)&d_bloques, sizeof(int)); cudaMemcpy(d_bloques, &bloques, sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&d_n, sizeof(size_t)); cudaMemcpy(d_n, &n, sizeof(size_t), cudaMemcpyHostToDevice); thrust::device_vector<complex<double> > d_expTable = expTable; thrust::device_vector<complex<double> > d_vec = vec; complex<double> * d_expTable_pointer = thrust::raw_pointer_cast(&d_expTable[0]); complex<double> * d_vec_pointer = thrust::raw_pointer_cast(&d_vec[0]); struct timeval* tval_before,* tval_after,* tval_result; tval_before = (struct timeval*)malloc(sizeof(struct timeval)); tval_after = (struct timeval*)malloc(sizeof(struct timeval)); tval_result = (struct timeval*)malloc(sizeof(struct timeval)); int IndexToBeProcessed [n/2]; // Cooley-Tukey decimation-in-time radix-2 FFT gettimeofday(tval_before, NULL); for (size_t size = 2; size <= n; size *= 2) { size_t halfsize = size / 2; size_t tablestep = n / size; size_t *d_halfsize,*d_tablestep,*d_size; int * d_IndexToBeProcessed; cudaMalloc((void **)&d_IndexToBeProcessed, sizeof(int)*(n/2)); cudaMalloc((void **)&d_halfsize, sizeof(size_t)); cudaMalloc((void **)&d_tablestep, sizeof(size_t)); cudaMalloc((void **)&d_size, sizeof(size_t)); cudaMemcpy(d_halfsize, &halfsize, sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy(d_tablestep, &tablestep, sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy(d_size, &size, sizeof(size_t), cudaMemcpyHostToDevice); cudaMemcpy(d_IndexToBeProcessed, &IndexToBeProcessed, sizeof(int)*(n/2), cudaMemcpyHostToDevice); cudaStream_t stream; cudaStreamCreate(&stream); transformCUDA<<<bloques,hilos,0,stream>>>(d_bloques,d_n,d_size,d_halfsize,d_tablestep,d_vec_pointer,d_expTable_pointer,d_IndexToBeProcessed); cudaStreamSynchronize(stream); cudaFree(d_halfsize); cudaFree(d_tablestep); cudaFree(d_size); //for (size_t i = 0; i < n; i += size) { //for (size_t j = i, k = 0; j < i + halfsize; j++, k += tablestep) { //complex<double> temp = vec[j + halfsize] * expTable[k]; //vec[j + halfsize] = vec[j] - temp; //vec[j] += temp; //} //} if (size == n) // Prevent overflow in 'size *= 2' break; } vec = d_vec; gettimeofday(tval_after, NULL); timersub(tval_after, tval_before, tval_result); printf("%ld.%06ld\n", (long int)tval_result->tv_sec, (long int)tval_result->tv_usec); cudaFree(d_n); cudaFree(d_bloques); } static size_t reverseBits(size_t val, int width) { size_t result = 0; for (int i = 0; i < width; i++, val >>= 1) result = (result << 1) | (val & 1U); return result; } int main(int argc, char *argv[]) { int hilos = atoi(argv[1]); int bloques = atoi(argv[2]); size_t n = 16384; testFft(n,hilos, bloques); return 0; } static void testFft(int n,int hilos,int bloques) { const thrust::host_vector<complex<double> > input = randomComplexes(n); thrust::host_vector<complex<double> > actual = input; //thrust::host_vector<complex<double> > actual (n); //actual[0] = 2; //actual[1] = 3; //actual[2] = -1; //actual[3] = 1; Fft::transform(actual, false,hilos, bloques); //for(int i = 0;i < n;i++){ //printf("%f + %fi\n", real(actual[i]), imag(actual[i])); //} } /*---- Utility functions ----*/ static thrust::host_vector<complex<double> > randomComplexes(int n) { std::uniform_real_distribution<double> valueDist(-1.0, 1.0); thrust::host_vector<complex<double> > result; for (int i = 0; i < n; i++) result.push_back(complex<double>(rand()%100, rand()%100)); return result; }
7d279010f2cc9d29d9ecd73a4abce579550c13d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/hypot_impl.cuh" __constant__ size_t start_cal[5]; __constant__ size_t end_cal[5]; __constant__ size_t output_cal[5]; template <typename T> struct HypotFunc { __device__ __host__ __forceinline__ T operator()(const T &x1, const T &x2) { return hypotf(x1, x2); } }; template <> struct HypotFunc<double> { __device__ __host__ __forceinline__ double operator()(const double &x1, const double &x2) { return hypot(x1, x2); } }; template <typename T, typename Func> __global__ void CalHypotKernel(size_t size, const T *x1, const T *x2, T *y) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { y[pos] = Func()(x1[pos], x2[pos]); } } __device__ __forceinline__ size_t Index(const size_t &index, const size_t &dim) { return dim == 1 ? 0 : index; } template <typename T, typename Func> __global__ void BroadcastHypotKernel( const size_t l0, const size_t l1, const size_t l2, const size_t l3, const size_t l4, const size_t l5, const size_t l6, const size_t r0, const size_t r1, const size_t r2, const size_t r3, const size_t r4, const size_t r5, const size_t r6, const size_t d0, const size_t d1, const size_t d2, const size_t d3, const size_t d4, const size_t d5, const size_t d6, const T *x1, const T *x2, T *y) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < d0 * d1 * d2 * d3 * d4 * d5 * d6; pos += blockDim.x * gridDim.x) { size_t i = pos / output_cal[0] % d0; size_t j = pos / output_cal[1] % d1; size_t k = pos / output_cal[2] % d2; size_t l = pos / output_cal[3] % d3; size_t m = pos / output_cal[4] % d4; size_t n = pos / d6 % d5; size_t o = pos % d6; size_t l_index = Index(i, l0) * start_cal[0]; l_index += Index(j, l1) * start_cal[1]; l_index += Index(k, l2) * start_cal[2]; l_index += Index(l, l3) * start_cal[3]; l_index += Index(m, l4) * start_cal[4]; l_index += Index(n, l5) * l6; l_index += Index(o, l6); size_t r_index = Index(i, r0) * end_cal[0]; r_index += Index(j, r1) * end_cal[1]; r_index += Index(k, r2) * end_cal[2]; r_index += Index(l, r3) * end_cal[3]; r_index += Index(m, r4) * end_cal[4]; r_index += Index(n, r5) * r6; r_index += Index(o, r6); y[pos] = Func()(x1[l_index], x2[r_index]); } } template <typename T> void CalHypot(size_t size, const T *x1, const T *x2, T *y, const uint32_t &device_id, hipStream_t cuda_stream) { returnhipLaunchKernelGGL(( CalHypotKernel<T, HypotFunc<T>>) , dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, x1, x2, y); } void CalShapeData(const std::vector<size_t> &start_shape, size_t *output) { output[4] = start_shape[5] * start_shape[6]; output[3] = output[4] * start_shape[4]; output[2] = output[3] * start_shape[3]; output[1] = output[2] * start_shape[2]; output[0] = output[1] * start_shape[1]; } template <typename T> void BroadcastHypot(const std::vector<size_t> &x1_shape, const std::vector<size_t> &x2_shape, const std::vector<size_t> &y_shape, const T *x1, const T *x2, T *y, const uint32_t &device_id, hipStream_t cuda_stream) { size_t size = 1; for (auto d : y_shape) { size *= d; } size_t start_dim[5]; size_t end_dim[5]; size_t output_dim[5]; CalShapeData(x1_shape, start_dim); CalShapeData(x2_shape, end_dim); CalShapeData(y_shape, output_dim); hipMemcpyToSymbol(start_cal, start_dim, sizeof(size_t) * 5); hipMemcpyToSymbol(end_cal, end_dim, sizeof(size_t) * 5); hipMemcpyToSymbol(output_cal, output_dim, sizeof(size_t) * 5); returnhipLaunchKernelGGL(( BroadcastHypotKernel<T, HypotFunc<T>>) , dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, x1_shape[0], x1_shape[1], x1_shape[2], x1_shape[3], x1_shape[4], x1_shape[5], x1_shape[6], x2_shape[0], x2_shape[1], x2_shape[2], x2_shape[3], x2_shape[4], x2_shape[5], x2_shape[6], y_shape[0], y_shape[1], y_shape[2], y_shape[3], y_shape[4], y_shape[5], y_shape[6], x1, x2, y); } template CUDA_LIB_EXPORT void CalHypot<float>(size_t, const float *, const float *, float *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalHypot<double>(size_t, const double *, const double *, double *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void BroadcastHypot<float>(const std::vector<size_t> &, const std::vector<size_t> &, const std::vector<size_t> &, const float *, const float *, float *, const uint32_t &, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void BroadcastHypot<double>(const std::vector<size_t> &, const std::vector<size_t> &, const std::vector<size_t> &, const double *, const double *, double *, const uint32_t &, hipStream_t cuda_stream);
7d279010f2cc9d29d9ecd73a4abce579550c13d5.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/hypot_impl.cuh" __constant__ size_t start_cal[5]; __constant__ size_t end_cal[5]; __constant__ size_t output_cal[5]; template <typename T> struct HypotFunc { __device__ __host__ __forceinline__ T operator()(const T &x1, const T &x2) { return hypotf(x1, x2); } }; template <> struct HypotFunc<double> { __device__ __host__ __forceinline__ double operator()(const double &x1, const double &x2) { return hypot(x1, x2); } }; template <typename T, typename Func> __global__ void CalHypotKernel(size_t size, const T *x1, const T *x2, T *y) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { y[pos] = Func()(x1[pos], x2[pos]); } } __device__ __forceinline__ size_t Index(const size_t &index, const size_t &dim) { return dim == 1 ? 0 : index; } template <typename T, typename Func> __global__ void BroadcastHypotKernel( const size_t l0, const size_t l1, const size_t l2, const size_t l3, const size_t l4, const size_t l5, const size_t l6, const size_t r0, const size_t r1, const size_t r2, const size_t r3, const size_t r4, const size_t r5, const size_t r6, const size_t d0, const size_t d1, const size_t d2, const size_t d3, const size_t d4, const size_t d5, const size_t d6, const T *x1, const T *x2, T *y) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < d0 * d1 * d2 * d3 * d4 * d5 * d6; pos += blockDim.x * gridDim.x) { size_t i = pos / output_cal[0] % d0; size_t j = pos / output_cal[1] % d1; size_t k = pos / output_cal[2] % d2; size_t l = pos / output_cal[3] % d3; size_t m = pos / output_cal[4] % d4; size_t n = pos / d6 % d5; size_t o = pos % d6; size_t l_index = Index(i, l0) * start_cal[0]; l_index += Index(j, l1) * start_cal[1]; l_index += Index(k, l2) * start_cal[2]; l_index += Index(l, l3) * start_cal[3]; l_index += Index(m, l4) * start_cal[4]; l_index += Index(n, l5) * l6; l_index += Index(o, l6); size_t r_index = Index(i, r0) * end_cal[0]; r_index += Index(j, r1) * end_cal[1]; r_index += Index(k, r2) * end_cal[2]; r_index += Index(l, r3) * end_cal[3]; r_index += Index(m, r4) * end_cal[4]; r_index += Index(n, r5) * r6; r_index += Index(o, r6); y[pos] = Func()(x1[l_index], x2[r_index]); } } template <typename T> void CalHypot(size_t size, const T *x1, const T *x2, T *y, const uint32_t &device_id, cudaStream_t cuda_stream) { return CalHypotKernel<T, HypotFunc<T>> <<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, x1, x2, y); } void CalShapeData(const std::vector<size_t> &start_shape, size_t *output) { output[4] = start_shape[5] * start_shape[6]; output[3] = output[4] * start_shape[4]; output[2] = output[3] * start_shape[3]; output[1] = output[2] * start_shape[2]; output[0] = output[1] * start_shape[1]; } template <typename T> void BroadcastHypot(const std::vector<size_t> &x1_shape, const std::vector<size_t> &x2_shape, const std::vector<size_t> &y_shape, const T *x1, const T *x2, T *y, const uint32_t &device_id, cudaStream_t cuda_stream) { size_t size = 1; for (auto d : y_shape) { size *= d; } size_t start_dim[5]; size_t end_dim[5]; size_t output_dim[5]; CalShapeData(x1_shape, start_dim); CalShapeData(x2_shape, end_dim); CalShapeData(y_shape, output_dim); cudaMemcpyToSymbol(start_cal, start_dim, sizeof(size_t) * 5); cudaMemcpyToSymbol(end_cal, end_dim, sizeof(size_t) * 5); cudaMemcpyToSymbol(output_cal, output_dim, sizeof(size_t) * 5); return BroadcastHypotKernel<T, HypotFunc<T>> <<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(x1_shape[0], x1_shape[1], x1_shape[2], x1_shape[3], x1_shape[4], x1_shape[5], x1_shape[6], x2_shape[0], x2_shape[1], x2_shape[2], x2_shape[3], x2_shape[4], x2_shape[5], x2_shape[6], y_shape[0], y_shape[1], y_shape[2], y_shape[3], y_shape[4], y_shape[5], y_shape[6], x1, x2, y); } template CUDA_LIB_EXPORT void CalHypot<float>(size_t, const float *, const float *, float *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalHypot<double>(size_t, const double *, const double *, double *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void BroadcastHypot<float>(const std::vector<size_t> &, const std::vector<size_t> &, const std::vector<size_t> &, const float *, const float *, float *, const uint32_t &, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void BroadcastHypot<double>(const std::vector<size_t> &, const std::vector<size_t> &, const std::vector<size_t> &, const double *, const double *, double *, const uint32_t &, cudaStream_t cuda_stream);
520f65a54eefbe6dfa196728b87fa8fc39c2a380.hip
// !!! This is a file automatically generated by hipify!!! #include "object/material/brick_material.hpp" using namespace px; BaseBrickMaterial::BaseBrickMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, Light const &ambient_edge, Light const &diffuse_edge, Light const &specular_edge, PREC const &shininessonent_edge, Light const &transmissive_edge, PREC const &refractive_index_edge, PREC const &scale, PREC const &edge_width, PREC const &edge_height) : _ambient(ambient), _diffuse(diffuse), _specular(specular), _shininessonent(shininessonent), _transmissive(transmissive), _refractive_index(refractive_index), _ambient_edge(ambient_edge), _diffuse_edge(diffuse_edge), _specular_edge(specular_edge), _shininessonent_edge(shininessonent_edge), _transmissive_edge(transmissive_edge), _refractive_index_edge(refractive_index_edge), _scale(scale), _edge_width(edge_width), _edge_height(edge_height) {} PX_CUDA_CALLABLE Light BaseBrickMaterial::getAmbient(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_ambient_edge; return o->_ambient; } PX_CUDA_CALLABLE Light BaseBrickMaterial::getDiffuse(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_diffuse_edge; return o->_diffuse; } PX_CUDA_CALLABLE Light BaseBrickMaterial::getSpecular(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_specular_edge; return o->_specular; } PX_CUDA_CALLABLE PREC BaseBrickMaterial::getShininess(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_shininessonent_edge; return o->_shininessonent; } PX_CUDA_CALLABLE Light BaseBrickMaterial::getTransmissive(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_transmissive_edge; return o->_transmissive; } PX_CUDA_CALLABLE PREC BaseBrickMaterial::getRefractiveIndex(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_refractive_index_edge; return o->_refractive_index; } PX_CUDA_CALLABLE bool BaseBrickMaterial::onEdge(PREC const &u, PREC const &v, PREC const &w) const noexcept { auto tx = static_cast<int>(::floor(_scale*u)); auto ty = static_cast<int>(::floor(_scale*v)); return ((std::abs(_scale*u - tx) < _edge_width) && ((tx & 0x0001) == (ty & 0x0001))) || (std::abs(_scale*v - ty) < _edge_height); } void BaseBrickMaterial::setAmbient(Light const &ambient) { _ambient = ambient; } void BaseBrickMaterial::setDiffuse(Light const &diffuse) { _diffuse = diffuse; } void BaseBrickMaterial::setSpecular(Light const &specular) { _specular = specular; } void BaseBrickMaterial::setShininess(PREC const &shininess) { _shininessonent = shininess; } void BaseBrickMaterial::setTransmissive(Light const &transmissive) { _transmissive = transmissive; } void BaseBrickMaterial::setRefractiveIndex(PREC const &ior) { _refractive_index = ior; } void BaseBrickMaterial::setAmbientEdge(Light const &ambient) { _ambient_edge = ambient; } void BaseBrickMaterial::setDiffuseEdge(Light const &diffuse) { _diffuse_edge = diffuse; } void BaseBrickMaterial::setSpecularEdge(Light const &specular) { _specular_edge = specular; } void BaseBrickMaterial::setShininessEdge(PREC const &shininess) { _shininessonent_edge = shininess; } void BaseBrickMaterial::setTransmissiveEdge(Light const &transmissive) { _transmissive_edge = transmissive; } void BaseBrickMaterial::setRefractiveIndexEdge(PREC const &ior) { _refractive_index_edge = ior; } void BaseBrickMaterial::setScale(PREC const &scale) { _scale = scale; } void BaseBrickMaterial::setEdgeWidth(PREC const &edge_width) { _edge_width = edge_width; } void BaseBrickMaterial::setEdgeHeight(PREC const &edge_height) { _edge_height = edge_height; } std::shared_ptr<BaseMaterial> BrickMaterial::create(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, Light const &ambient_edge, Light const &diffuse_edge, Light const &specular_edge, PREC const &shininessonent_edge, Light const &transmissive_edge, PREC const &refractive_index_edge, PREC const &scale, PREC const &edge_width, PREC const &edge_height) { return std::shared_ptr<BaseMaterial>(new BrickMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, ambient_edge, diffuse_edge, specular_edge, shininessonent_edge, transmissive_edge, refractive_index_edge, scale, edge_width, edge_height)); } BrickMaterial::BrickMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, Light const &ambient_edge, Light const &diffuse_edge, Light const &specular_edge, PREC const &shininessonent_edge, Light const &transmissive_edge, PREC const &refractive_index_edge, PREC const &scale, PREC const &edge_width, PREC const &edge_height) : BaseMaterial(), _obj(new BaseBrickMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, ambient_edge, diffuse_edge, specular_edge, shininessonent_edge, transmissive_edge, refractive_index_edge, scale, edge_width, edge_height)), _gpu_obj(nullptr), _need_upload(true) {} BrickMaterial::~BrickMaterial() { delete _obj; #ifdef USE_ROCM clearGpuData(); #endif } #ifdef USE_ROCM __device__ fnAmbient_t __fn_ambient_brick_material = BaseBrickMaterial::getAmbient; __device__ fnDiffuse_t __fn_diffuse_brick_material = BaseBrickMaterial::getDiffuse; __device__ fnSpecular_t __fn_specular_brick_material = BaseBrickMaterial::getSpecular; __device__ fnShininess_t __fn_shininess_brick_material = BaseBrickMaterial::getShininess; __device__ fnTransmissive_t __fn_transmissive_brick_material = BaseBrickMaterial::getTransmissive; __device__ fnRefractiveIndex_t __fn_refractive_index_brick_material = BaseBrickMaterial::getRefractiveIndex; #endif void BrickMaterial::up2Gpu() { #ifdef USE_ROCM static fnAmbient_t fn_ambient_h = nullptr; static fnDiffuse_t fn_diffuse_h; static fnSpecular_t fn_specular_h; static fnShininess_t fn_shininess_h; static fnTransmissive_t fn_transmissive_h; static fnRefractiveIndex_t fn_refractive_index_h; if (_need_upload) { if (dev_ptr == nullptr) { PX_CUDA_CHECK(hipMalloc(&_gpu_obj, sizeof(BaseBrickMaterial))); PX_CUDA_CHECK(hipMalloc(&dev_ptr, sizeof(MaterialObj))); } if (fn_ambient_h == nullptr) { PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_ambient_h, __fn_ambient_brick_material, sizeof(fnAmbient_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_diffuse_h, __fn_diffuse_brick_material, sizeof(fnDiffuse_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_specular_h, __fn_specular_brick_material, sizeof(fnSpecular_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_shininess_h, __fn_shininess_brick_material, sizeof(fnShininess_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_transmissive_h, __fn_transmissive_brick_material, sizeof(fnTransmissive_t))); PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_refractive_index_h, __fn_refractive_index_brick_material, sizeof(fnRefractiveIndex_t))); } PX_CUDA_CHECK(hipMemcpy(_gpu_obj, _obj, sizeof(BaseBrickMaterial), hipMemcpyHostToDevice)); MaterialObj tmp(_gpu_obj, fn_ambient_h, fn_diffuse_h, fn_specular_h, fn_shininess_h, fn_transmissive_h, fn_refractive_index_h); PX_CUDA_CHECK(hipMemcpy(dev_ptr, &tmp, sizeof(MaterialObj), hipMemcpyHostToDevice)); _need_upload = false; } #endif } void BrickMaterial::clearGpuData() { #ifdef USE_ROCM if (_gpu_obj != nullptr) { PX_CUDA_CHECK(hipFree(_gpu_obj)); _gpu_obj = nullptr; } BaseMaterial::clearGpuData(); #endif } PREC BrickMaterial::Shininess(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getShininess(_obj, u, v, w); } PREC BrickMaterial::refractiveIndex(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getRefractiveIndex(_obj, u, v, w); } Light BrickMaterial::getAmbient(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getAmbient(_obj, u, v, w); } Light BrickMaterial::getDiffuse(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getDiffuse(_obj, u, v, w); } Light BrickMaterial::getSpecular(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getSpecular(_obj, u, v, w); } Light BrickMaterial::getTransmissive(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getTransmissive(_obj, u, v, w); } void BrickMaterial::setAmbient(Light const &ambient) { _obj->setAmbient(ambient); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setDiffuse(Light const &diffuse) { _obj->setDiffuse(diffuse); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setSpecular(Light const &specular) { _obj->setSpecular(specular); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setShininess(PREC const &shininess) { _obj->setShininess(shininess); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setTransmissive(Light const &transmissive) { _obj->setTransmissive(transmissive); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setRefractiveIndex(PREC const &ior) { _obj->setRefractiveIndex(ior); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setAmbientEdge(Light const &ambient) { _obj->setAmbientEdge(ambient); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setDiffuseEdge(Light const &diffuse) { _obj->setDiffuseEdge(diffuse); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setSpecularEdge(Light const &specular) { _obj->setSpecularEdge(specular); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setShininessEdge(PREC const &shininess) { _obj->setShininessEdge(shininess); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setTransmissiveEdge(Light const &transmissive) { _obj->setTransmissiveEdge(transmissive); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setRefractiveIndexEdge(PREC const &ior) { _obj->setRefractiveIndexEdge(ior); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setScale(PREC const &scale) { _obj->setScale(scale); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setEdgeWidth(PREC const &edge_width) { _obj->setEdgeWidth(edge_width); #ifdef USE_ROCM _need_upload = true; #endif } void BrickMaterial::setEdgeHeight(PREC const &edge_height) { _obj->setEdgeHeight(edge_height); #ifdef USE_ROCM _need_upload = true; #endif }
520f65a54eefbe6dfa196728b87fa8fc39c2a380.cu
#include "object/material/brick_material.hpp" using namespace px; BaseBrickMaterial::BaseBrickMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, Light const &ambient_edge, Light const &diffuse_edge, Light const &specular_edge, PREC const &shininessonent_edge, Light const &transmissive_edge, PREC const &refractive_index_edge, PREC const &scale, PREC const &edge_width, PREC const &edge_height) : _ambient(ambient), _diffuse(diffuse), _specular(specular), _shininessonent(shininessonent), _transmissive(transmissive), _refractive_index(refractive_index), _ambient_edge(ambient_edge), _diffuse_edge(diffuse_edge), _specular_edge(specular_edge), _shininessonent_edge(shininessonent_edge), _transmissive_edge(transmissive_edge), _refractive_index_edge(refractive_index_edge), _scale(scale), _edge_width(edge_width), _edge_height(edge_height) {} PX_CUDA_CALLABLE Light BaseBrickMaterial::getAmbient(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_ambient_edge; return o->_ambient; } PX_CUDA_CALLABLE Light BaseBrickMaterial::getDiffuse(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_diffuse_edge; return o->_diffuse; } PX_CUDA_CALLABLE Light BaseBrickMaterial::getSpecular(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_specular_edge; return o->_specular; } PX_CUDA_CALLABLE PREC BaseBrickMaterial::getShininess(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_shininessonent_edge; return o->_shininessonent; } PX_CUDA_CALLABLE Light BaseBrickMaterial::getTransmissive(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_transmissive_edge; return o->_transmissive; } PX_CUDA_CALLABLE PREC BaseBrickMaterial::getRefractiveIndex(void *const &obj, PREC const &u, PREC const &v, PREC const &w) { auto o = reinterpret_cast<BaseBrickMaterial*>(obj); if (o->onEdge(u, v, w)) return o->_refractive_index_edge; return o->_refractive_index; } PX_CUDA_CALLABLE bool BaseBrickMaterial::onEdge(PREC const &u, PREC const &v, PREC const &w) const noexcept { auto tx = static_cast<int>(std::floor(_scale*u)); auto ty = static_cast<int>(std::floor(_scale*v)); return ((std::abs(_scale*u - tx) < _edge_width) && ((tx & 0x0001) == (ty & 0x0001))) || (std::abs(_scale*v - ty) < _edge_height); } void BaseBrickMaterial::setAmbient(Light const &ambient) { _ambient = ambient; } void BaseBrickMaterial::setDiffuse(Light const &diffuse) { _diffuse = diffuse; } void BaseBrickMaterial::setSpecular(Light const &specular) { _specular = specular; } void BaseBrickMaterial::setShininess(PREC const &shininess) { _shininessonent = shininess; } void BaseBrickMaterial::setTransmissive(Light const &transmissive) { _transmissive = transmissive; } void BaseBrickMaterial::setRefractiveIndex(PREC const &ior) { _refractive_index = ior; } void BaseBrickMaterial::setAmbientEdge(Light const &ambient) { _ambient_edge = ambient; } void BaseBrickMaterial::setDiffuseEdge(Light const &diffuse) { _diffuse_edge = diffuse; } void BaseBrickMaterial::setSpecularEdge(Light const &specular) { _specular_edge = specular; } void BaseBrickMaterial::setShininessEdge(PREC const &shininess) { _shininessonent_edge = shininess; } void BaseBrickMaterial::setTransmissiveEdge(Light const &transmissive) { _transmissive_edge = transmissive; } void BaseBrickMaterial::setRefractiveIndexEdge(PREC const &ior) { _refractive_index_edge = ior; } void BaseBrickMaterial::setScale(PREC const &scale) { _scale = scale; } void BaseBrickMaterial::setEdgeWidth(PREC const &edge_width) { _edge_width = edge_width; } void BaseBrickMaterial::setEdgeHeight(PREC const &edge_height) { _edge_height = edge_height; } std::shared_ptr<BaseMaterial> BrickMaterial::create(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, Light const &ambient_edge, Light const &diffuse_edge, Light const &specular_edge, PREC const &shininessonent_edge, Light const &transmissive_edge, PREC const &refractive_index_edge, PREC const &scale, PREC const &edge_width, PREC const &edge_height) { return std::shared_ptr<BaseMaterial>(new BrickMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, ambient_edge, diffuse_edge, specular_edge, shininessonent_edge, transmissive_edge, refractive_index_edge, scale, edge_width, edge_height)); } BrickMaterial::BrickMaterial(Light const &ambient, Light const &diffuse, Light const &specular, PREC const &shininessonent, Light const &transmissive, PREC const &refractive_index, Light const &ambient_edge, Light const &diffuse_edge, Light const &specular_edge, PREC const &shininessonent_edge, Light const &transmissive_edge, PREC const &refractive_index_edge, PREC const &scale, PREC const &edge_width, PREC const &edge_height) : BaseMaterial(), _obj(new BaseBrickMaterial(ambient, diffuse, specular, shininessonent, transmissive, refractive_index, ambient_edge, diffuse_edge, specular_edge, shininessonent_edge, transmissive_edge, refractive_index_edge, scale, edge_width, edge_height)), _gpu_obj(nullptr), _need_upload(true) {} BrickMaterial::~BrickMaterial() { delete _obj; #ifdef USE_CUDA clearGpuData(); #endif } #ifdef USE_CUDA __device__ fnAmbient_t __fn_ambient_brick_material = BaseBrickMaterial::getAmbient; __device__ fnDiffuse_t __fn_diffuse_brick_material = BaseBrickMaterial::getDiffuse; __device__ fnSpecular_t __fn_specular_brick_material = BaseBrickMaterial::getSpecular; __device__ fnShininess_t __fn_shininess_brick_material = BaseBrickMaterial::getShininess; __device__ fnTransmissive_t __fn_transmissive_brick_material = BaseBrickMaterial::getTransmissive; __device__ fnRefractiveIndex_t __fn_refractive_index_brick_material = BaseBrickMaterial::getRefractiveIndex; #endif void BrickMaterial::up2Gpu() { #ifdef USE_CUDA static fnAmbient_t fn_ambient_h = nullptr; static fnDiffuse_t fn_diffuse_h; static fnSpecular_t fn_specular_h; static fnShininess_t fn_shininess_h; static fnTransmissive_t fn_transmissive_h; static fnRefractiveIndex_t fn_refractive_index_h; if (_need_upload) { if (dev_ptr == nullptr) { PX_CUDA_CHECK(cudaMalloc(&_gpu_obj, sizeof(BaseBrickMaterial))); PX_CUDA_CHECK(cudaMalloc(&dev_ptr, sizeof(MaterialObj))); } if (fn_ambient_h == nullptr) { PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_ambient_h, __fn_ambient_brick_material, sizeof(fnAmbient_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_diffuse_h, __fn_diffuse_brick_material, sizeof(fnDiffuse_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_specular_h, __fn_specular_brick_material, sizeof(fnSpecular_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_shininess_h, __fn_shininess_brick_material, sizeof(fnShininess_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_transmissive_h, __fn_transmissive_brick_material, sizeof(fnTransmissive_t))); PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_refractive_index_h, __fn_refractive_index_brick_material, sizeof(fnRefractiveIndex_t))); } PX_CUDA_CHECK(cudaMemcpy(_gpu_obj, _obj, sizeof(BaseBrickMaterial), cudaMemcpyHostToDevice)); MaterialObj tmp(_gpu_obj, fn_ambient_h, fn_diffuse_h, fn_specular_h, fn_shininess_h, fn_transmissive_h, fn_refractive_index_h); PX_CUDA_CHECK(cudaMemcpy(dev_ptr, &tmp, sizeof(MaterialObj), cudaMemcpyHostToDevice)); _need_upload = false; } #endif } void BrickMaterial::clearGpuData() { #ifdef USE_CUDA if (_gpu_obj != nullptr) { PX_CUDA_CHECK(cudaFree(_gpu_obj)); _gpu_obj = nullptr; } BaseMaterial::clearGpuData(); #endif } PREC BrickMaterial::Shininess(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getShininess(_obj, u, v, w); } PREC BrickMaterial::refractiveIndex(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getRefractiveIndex(_obj, u, v, w); } Light BrickMaterial::getAmbient(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getAmbient(_obj, u, v, w); } Light BrickMaterial::getDiffuse(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getDiffuse(_obj, u, v, w); } Light BrickMaterial::getSpecular(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getSpecular(_obj, u, v, w); } Light BrickMaterial::getTransmissive(PREC const &u, PREC const &v, PREC const &w) const { return BaseBrickMaterial::getTransmissive(_obj, u, v, w); } void BrickMaterial::setAmbient(Light const &ambient) { _obj->setAmbient(ambient); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setDiffuse(Light const &diffuse) { _obj->setDiffuse(diffuse); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setSpecular(Light const &specular) { _obj->setSpecular(specular); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setShininess(PREC const &shininess) { _obj->setShininess(shininess); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setTransmissive(Light const &transmissive) { _obj->setTransmissive(transmissive); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setRefractiveIndex(PREC const &ior) { _obj->setRefractiveIndex(ior); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setAmbientEdge(Light const &ambient) { _obj->setAmbientEdge(ambient); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setDiffuseEdge(Light const &diffuse) { _obj->setDiffuseEdge(diffuse); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setSpecularEdge(Light const &specular) { _obj->setSpecularEdge(specular); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setShininessEdge(PREC const &shininess) { _obj->setShininessEdge(shininess); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setTransmissiveEdge(Light const &transmissive) { _obj->setTransmissiveEdge(transmissive); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setRefractiveIndexEdge(PREC const &ior) { _obj->setRefractiveIndexEdge(ior); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setScale(PREC const &scale) { _obj->setScale(scale); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setEdgeWidth(PREC const &edge_width) { _obj->setEdgeWidth(edge_width); #ifdef USE_CUDA _need_upload = true; #endif } void BrickMaterial::setEdgeHeight(PREC const &edge_height) { _obj->setEdgeHeight(edge_height); #ifdef USE_CUDA _need_upload = true; #endif }
d620158bd4f0f482a23da1d1f343757c1b94438e.hip
// !!! This is a file automatically generated by hipify!!! #include "mmul.h" #include <hip/hip_runtime.h> #include <stdio.h> #include <random> #include <rocblas.h> int main(int argc, char *argv[]) { // obtain commandline input int n = atol(argv[1]); int n_tests = atol(argv[2]); // set up random number from -1 to 1 generator std::random_device entropy_source; std::mt19937_64 generator(entropy_source()); const float min = -1.0, max = 1.0; // The range for the random number std::uniform_real_distribution<float> dist(min, max); // allocate array float *a, *b, *c; hipMallocManaged((void **)&a, sizeof(float) * n * n); hipMallocManaged((void **)&b, sizeof(float) * n * n); hipMallocManaged((void **)&c, sizeof(float) * n * n); // insert random initial value into it for (int i = 0; i < n * n; i++) { a[i] = dist(generator); b[i] = dist(generator); c[i] = dist(generator); } // setup the use of cublas // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); /// time for the operations. // set up timer hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); for (int i = 0; i < n_tests; i++) { mmul(handle, a, b, c, n); } hipEventRecord(stop); hipEventSynchronize(stop); // Get the elapsed time in milliseconds float ms; hipEventElapsedTime(&ms, start, stop); float averageTime = ms/n_tests; printf("%f\n", averageTime); //clean up everything hipblasDestroy(handle); hipFree(a); hipFree(b); hipFree(c); }
d620158bd4f0f482a23da1d1f343757c1b94438e.cu
#include "mmul.h" #include <cuda.h> #include <stdio.h> #include <random> #include <cublas_v2.h> int main(int argc, char *argv[]) { // obtain commandline input int n = atol(argv[1]); int n_tests = atol(argv[2]); // set up random number from -1 to 1 generator std::random_device entropy_source; std::mt19937_64 generator(entropy_source()); const float min = -1.0, max = 1.0; // The range for the random number std::uniform_real_distribution<float> dist(min, max); // allocate array float *a, *b, *c; cudaMallocManaged((void **)&a, sizeof(float) * n * n); cudaMallocManaged((void **)&b, sizeof(float) * n * n); cudaMallocManaged((void **)&c, sizeof(float) * n * n); // insert random initial value into it for (int i = 0; i < n * n; i++) { a[i] = dist(generator); b[i] = dist(generator); c[i] = dist(generator); } // setup the use of cublas // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); /// time for the operations. // set up timer cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for (int i = 0; i < n_tests; i++) { mmul(handle, a, b, c, n); } cudaEventRecord(stop); cudaEventSynchronize(stop); // Get the elapsed time in milliseconds float ms; cudaEventElapsedTime(&ms, start, stop); float averageTime = ms/n_tests; printf("%f\n", averageTime); //clean up everything cublasDestroy(handle); cudaFree(a); cudaFree(b); cudaFree(c); }
3f912ea10b43f1709cd8bcb535a501c4c9798877.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/rotate/rotate.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./rotate.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { static const int BX = 8; static const int BY = 8; namespace { #define rep(i, n) for (size_t i = 0; i < (n); ++i) template <typename T, bool clockwise, size_t IC> __global__ void rotate_kern(const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2) { int iw = blockIdx.x * blockDim.x + threadIdx.x; int ih = blockIdx.y * blockDim.y + threadIdx.y; if (iw < IW && ih < IH) { int ow = clockwise ? IH - ih - 1 : ih; int oh = clockwise ? iw : IW - iw - 1; #pragma unroll rep(c, IC) { dst[blockIdx.z * ostride0 + oh * ostride1 + ow * ostride2 + c] = src[blockIdx.z * istride0 + ih * istride1 + iw * istride2 + c]; } } } #undef rep } // anonymous namespace namespace rotate { template <typename T, bool clockwise> void rotate(const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, hipStream_t stream) { dim3 threads(BX, BY); dim3 blocks(DIVUP(IW, BX), DIVUP(IH, BY), N); megdnn_assert(CH == 1 || CH == 3); if (CH == 1) hipLaunchKernelGGL(( rotate_kern<T, clockwise, 1>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); else hipLaunchKernelGGL(( rotate_kern<T, clockwise, 3>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); after_kernel_launch(); } #define INST(T, clockwise) \ template void rotate<T, clockwise>( \ const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, \ size_t istride0, size_t istride1, size_t istride2, size_t OH, \ size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, \ hipStream_t stream); #define cb(DType) \ INST(typename DTypeTrait<DType>::ctype, true) \ INST(typename DTypeTrait<DType>::ctype, false) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace rotate } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
3f912ea10b43f1709cd8bcb535a501c4c9798877.cu
/** * \file dnn/src/cuda/rotate/rotate.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./rotate.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { static const int BX = 8; static const int BY = 8; namespace { #define rep(i, n) for (size_t i = 0; i < (n); ++i) template <typename T, bool clockwise, size_t IC> __global__ void rotate_kern(const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2) { int iw = blockIdx.x * blockDim.x + threadIdx.x; int ih = blockIdx.y * blockDim.y + threadIdx.y; if (iw < IW && ih < IH) { int ow = clockwise ? IH - ih - 1 : ih; int oh = clockwise ? iw : IW - iw - 1; #pragma unroll rep(c, IC) { dst[blockIdx.z * ostride0 + oh * ostride1 + ow * ostride2 + c] = src[blockIdx.z * istride0 + ih * istride1 + iw * istride2 + c]; } } } #undef rep } // anonymous namespace namespace rotate { template <typename T, bool clockwise> void rotate(const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, size_t istride0, size_t istride1, size_t istride2, size_t OH, size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, cudaStream_t stream) { dim3 threads(BX, BY); dim3 blocks(DIVUP(IW, BX), DIVUP(IH, BY), N); megdnn_assert(CH == 1 || CH == 3); if (CH == 1) rotate_kern<T, clockwise, 1><<<blocks, threads, 0, stream>>>( src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); else rotate_kern<T, clockwise, 3><<<blocks, threads, 0, stream>>>( src, dst, N, IH, IW, istride0, istride1, istride2, OH, OW, ostride0, ostride1, ostride2); after_kernel_launch(); } #define INST(T, clockwise) \ template void rotate<T, clockwise>( \ const T* src, T* dst, size_t N, size_t IH, size_t IW, size_t CH, \ size_t istride0, size_t istride1, size_t istride2, size_t OH, \ size_t OW, size_t ostride0, size_t ostride1, size_t ostride2, \ cudaStream_t stream); #define cb(DType) \ INST(typename DTypeTrait<DType>::ctype, true) \ INST(typename DTypeTrait<DType>::ctype, false) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace rotate } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
9d31bac895cea136aa72581873da1833c9523bfa.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/opencv.hpp> #include <thrust/window_2d.h> using namespace cv; #define KERNEL_LENGTH 5 __constant__ float c_kernel[KERNEL_LENGTH*KERNEL_LENGTH]; inline float gauss(int x, int y, int mid, float sigma ) { float temp = (pow(x-mid,2)+pow(y-mid,2))/sigma; temp= exp(-temp); return temp; } void getGaussianKernelBlock(int dim, float sigma,float *GaussianKernel ) { assert(dim%2); int mid = (dim-1)/2; float total = 0; for(int i = 0; i<dim;i++) { for(int j = 0; j<dim;j++) { total+=gauss(i,j,mid,sigma); (GaussianKernel)[i*dim + j]=gauss(i,j,mid,sigma); } } float newTotal=0; for(int i = 0; i<dim;i++) { for(int j = 0; j<dim;j++) { (GaussianKernel)[i*dim + j]/=total; newTotal += (GaussianKernel)[i*dim + j]; } } } class convolutionFunctor //:public thrust::shared_unary_window_transform_functor<uchar> { public: int dim; convolutionFunctor(int dim) { this->dim =dim; } __device__ uchar operator() (const thrust::window_2d<uchar> & input_window,const thrust::window_2d<uchar> & output_window) const { uchar temp = 0; for(int i = 0; i< dim; i++) { for(int j = 0; j<dim; j++) { temp+=input_window[make_int2(j,i)]*(c_kernel)[i*dim + j]; } } output_window[1][1]=temp; return 0; } }; int main(int argc, char const *argv[]) { hipDeviceProp_t dev_prop; hipGetDeviceProperties(&dev_prop,0); Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE); Mat image; int dim = 5; int dim_image = 512; if(argc ==2) { dim_image = atoi(argv[1]); } resize(small,image,Size(dim_image,dim_image)); float *hkernel = (float *) std::malloc(sizeof(float) * dim*dim); getGaussianKernelBlock(dim,5,hkernel); hipMemcpyToSymbol(c_kernel, hkernel, dim*dim * sizeof(float)); thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows); thrust::block_2d<uchar> output_image_block(image.cols,image.rows); thrust::block_2d<uchar> null_block (image.cols,image.rows); uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin())); for(int i = 0; i<image.cols*image.rows;i++) { img[i]=(uchar)image.ptr()[i]; } uchar_image_block.upload(img); thrust::window_vector<uchar> input_wv(&uchar_image_block,dim,dim,1,1); thrust::window_vector<uchar> output_wv(&output_image_block,dim,dim,1,1); thrust::transform(input_wv.begin(),input_wv.end(),output_wv.begin(),null_block.begin(),convolutionFunctor(dim)); unsigned char * toutputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin())); output_image_block.download(&img); for(int i = 0; i<image.cols*image.rows;i++) { toutputFloatImageData[i]=(unsigned char)img[i]; } Mat output (Size(image.cols,image.rows),CV_8UC1,toutputFloatImageData); #ifdef OWRITE imwrite("input.png",image); imwrite("output.png",output); #endif #ifdef SHOW imshow("input.png",image); imshow("output.png",output); waitKey(0); #endif return 0; }
9d31bac895cea136aa72581873da1833c9523bfa.cu
#include <opencv2/opencv.hpp> #include <thrust/window_2d.h> using namespace cv; #define KERNEL_LENGTH 5 __constant__ float c_kernel[KERNEL_LENGTH*KERNEL_LENGTH]; inline float gauss(int x, int y, int mid, float sigma ) { float temp = (pow(x-mid,2)+pow(y-mid,2))/sigma; temp= exp(-temp); return temp; } void getGaussianKernelBlock(int dim, float sigma,float *GaussianKernel ) { assert(dim%2); int mid = (dim-1)/2; float total = 0; for(int i = 0; i<dim;i++) { for(int j = 0; j<dim;j++) { total+=gauss(i,j,mid,sigma); (GaussianKernel)[i*dim + j]=gauss(i,j,mid,sigma); } } float newTotal=0; for(int i = 0; i<dim;i++) { for(int j = 0; j<dim;j++) { (GaussianKernel)[i*dim + j]/=total; newTotal += (GaussianKernel)[i*dim + j]; } } } class convolutionFunctor //:public thrust::shared_unary_window_transform_functor<uchar> { public: int dim; convolutionFunctor(int dim) { this->dim =dim; } __device__ uchar operator() (const thrust::window_2d<uchar> & input_window,const thrust::window_2d<uchar> & output_window) const { uchar temp = 0; for(int i = 0; i< dim; i++) { for(int j = 0; j<dim; j++) { temp+=input_window[make_int2(j,i)]*(c_kernel)[i*dim + j]; } } output_window[1][1]=temp; return 0; } }; int main(int argc, char const *argv[]) { cudaDeviceProp dev_prop; cudaGetDeviceProperties(&dev_prop,0); Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE); Mat image; int dim = 5; int dim_image = 512; if(argc ==2) { dim_image = atoi(argv[1]); } resize(small,image,Size(dim_image,dim_image)); float *hkernel = (float *) std::malloc(sizeof(float) * dim*dim); getGaussianKernelBlock(dim,5,hkernel); cudaMemcpyToSymbol(c_kernel, hkernel, dim*dim * sizeof(float)); thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows); thrust::block_2d<uchar> output_image_block(image.cols,image.rows); thrust::block_2d<uchar> null_block (image.cols,image.rows); uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin())); for(int i = 0; i<image.cols*image.rows;i++) { img[i]=(uchar)image.ptr()[i]; } uchar_image_block.upload(img); thrust::window_vector<uchar> input_wv(&uchar_image_block,dim,dim,1,1); thrust::window_vector<uchar> output_wv(&output_image_block,dim,dim,1,1); thrust::transform(input_wv.begin(),input_wv.end(),output_wv.begin(),null_block.begin(),convolutionFunctor(dim)); unsigned char * toutputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(uchar_image_block.end()-uchar_image_block.begin())); output_image_block.download(&img); for(int i = 0; i<image.cols*image.rows;i++) { toutputFloatImageData[i]=(unsigned char)img[i]; } Mat output (Size(image.cols,image.rows),CV_8UC1,toutputFloatImageData); #ifdef OWRITE imwrite("input.png",image); imwrite("output.png",output); #endif #ifdef SHOW imshow("input.png",image); imshow("output.png",output); waitKey(0); #endif return 0; }
2084392c3fbfceb1018e718ac86335329637cbd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "implicitFunc.h" #include <cstdio> #include <cassert> #include "defines.hpp" #include "cudaUtils.hpp" __device__ __host__ float d1(float x, float y, float z) { return x; } __device__ __host__ float d2(float x, float y, float z) { return y; } __device__ __host__ float d3(float x, float y, float z) { return z; } __device__ __host__ float op1(float d1, float d2) { return d1+d2; } __device__ __host__ float op2(float d1, float d2) { return d1-d2; } const unsigned int nDensityFunctions = 3u; const unsigned int nOperatorFunctions = 2u; const densityFunction density_functions_p_h[nDensityFunctions] = {d1,d2,d3}; const operatorFunction operator_functions_p_h[nOperatorFunctions] = {op1,op2}; __device__ densityFunction density_functions_p_d[nDensityFunctions]; __device__ operatorFunction operator_functions_p_d[nOperatorFunctions]; __global__ void initPointersKernel() { density_functions_p_d[0] = d1; density_functions_p_d[1] = d2; density_functions_p_d[2] = d3; operator_functions_p_d[0] = op1; operator_functions_p_d[1] = op2; } std::map<operatorFunction, operatorFunction> operatorFunctionPointers; std::map<densityFunction, densityFunction> densityFunctionPointers; __host__ void initPointers() { //densities densityFunction density_functions_p_d_h[nDensityFunctions] = {d1,d2,d3}; PRINTD("Init pointers kernel :\n"); hipLaunchKernelGGL(( initPointersKernel), dim3(1),dim3(1), 0, 0, ); checkKernelExecution(); CHECK_CUDA_ERRORS(hipMemcpyFromSymbol( &density_functions_p_d_h, density_functions_p_d, nDensityFunctions*sizeof(densityFunction), 0,hipMemcpyDeviceToHost)); PRINTD("Densities :\n"); for (unsigned int i = 0u; i < nDensityFunctions; i++) { densityFunctionPointers.insert( std::pair<densityFunction,densityFunction>(density_functions_p_h[i], density_functions_p_d_h[i]) ); PRINTD("\tDensity %i: %p \t %p\n", i, density_functions_p_h[i], density_functions_p_d_h[i]); } //operators operatorFunction operator_functions_p_d_h[nOperatorFunctions]; CHECK_CUDA_ERRORS(hipMemcpyFromSymbol( operator_functions_p_d_h, operator_functions_p_d, nOperatorFunctions*sizeof(operatorFunction), 0,hipMemcpyDeviceToHost)); PRINTD("Operators :\n"); for (unsigned int i = 0u; i < nOperatorFunctions; i++) { operatorFunctionPointers.insert( std::pair<operatorFunction,operatorFunction>(operator_functions_p_h[i], operator_functions_p_d_h[i]) ); PRINTD("\tOperator %i: %p \t %p\n", i, operator_functions_p_h[i], operator_functions_p_d_h[i]); } }
2084392c3fbfceb1018e718ac86335329637cbd0.cu
#include "implicitFunc.h" #include <cstdio> #include <cassert> #include "defines.hpp" #include "cudaUtils.hpp" __device__ __host__ float d1(float x, float y, float z) { return x; } __device__ __host__ float d2(float x, float y, float z) { return y; } __device__ __host__ float d3(float x, float y, float z) { return z; } __device__ __host__ float op1(float d1, float d2) { return d1+d2; } __device__ __host__ float op2(float d1, float d2) { return d1-d2; } const unsigned int nDensityFunctions = 3u; const unsigned int nOperatorFunctions = 2u; const densityFunction density_functions_p_h[nDensityFunctions] = {d1,d2,d3}; const operatorFunction operator_functions_p_h[nOperatorFunctions] = {op1,op2}; __device__ densityFunction density_functions_p_d[nDensityFunctions]; __device__ operatorFunction operator_functions_p_d[nOperatorFunctions]; __global__ void initPointersKernel() { density_functions_p_d[0] = d1; density_functions_p_d[1] = d2; density_functions_p_d[2] = d3; operator_functions_p_d[0] = op1; operator_functions_p_d[1] = op2; } std::map<operatorFunction, operatorFunction> operatorFunctionPointers; std::map<densityFunction, densityFunction> densityFunctionPointers; __host__ void initPointers() { //densities densityFunction density_functions_p_d_h[nDensityFunctions] = {d1,d2,d3}; PRINTD("Init pointers kernel :\n"); initPointersKernel<<<1,1>>>(); checkKernelExecution(); CHECK_CUDA_ERRORS(cudaMemcpyFromSymbol( &density_functions_p_d_h, density_functions_p_d, nDensityFunctions*sizeof(densityFunction), 0,cudaMemcpyDeviceToHost)); PRINTD("Densities :\n"); for (unsigned int i = 0u; i < nDensityFunctions; i++) { densityFunctionPointers.insert( std::pair<densityFunction,densityFunction>(density_functions_p_h[i], density_functions_p_d_h[i]) ); PRINTD("\tDensity %i: %p \t %p\n", i, density_functions_p_h[i], density_functions_p_d_h[i]); } //operators operatorFunction operator_functions_p_d_h[nOperatorFunctions]; CHECK_CUDA_ERRORS(cudaMemcpyFromSymbol( operator_functions_p_d_h, operator_functions_p_d, nOperatorFunctions*sizeof(operatorFunction), 0,cudaMemcpyDeviceToHost)); PRINTD("Operators :\n"); for (unsigned int i = 0u; i < nOperatorFunctions; i++) { operatorFunctionPointers.insert( std::pair<operatorFunction,operatorFunction>(operator_functions_p_h[i], operator_functions_p_d_h[i]) ); PRINTD("\tOperator %i: %p \t %p\n", i, operator_functions_p_h[i], operator_functions_p_d_h[i]); } }
157ad18d66026d7115a6cf8ec26d30c2621ca20b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "top_pooling_cuda_kernel.h" #define BLOCK 512 dim3 cuda_gridsize(int n) { int k = (n-1) / BLOCK + 1; int x = k; int y = 1; if(x > 65535){ x = ceil(sqrt(k)); y = (n-1)/(x*BLOCK) + 1; } dim3 d(x, y, 1); //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK); return d; } __global__ void top_pooling_forward_kernel(int N, float const *x, int w, int h, int c, int batch, float *offset, float *forward_ind, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int sp = in_w + w*(in_h + h*b); sp = in_h+int(offset[sp]); if(sp > h) sp = h; int max_ind = in_index; int out_index = 0; for(int ind=in_h;ind<sp;++ind) { out_index = in_w + w*(ind + h*(in_c + c*b)); if(x[max_ind] < x[out_index]) max_ind = out_index; } out[in_index] = x[max_ind]; forward_ind[in_index] = max_ind; } __global__ void top_pooling_backward_kernel(int N, float const *x, int w, int h, int c, int batch, float *backward_ind, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int max_ind = backward_ind[i]; atomicAdd(&out[max_ind],x[i]); } void top_pooling_forward_ongpu(float *x, int w, int h, int c, int batch, float *offset, float *forward_ind, float *out) { int size = w*h*c*batch; hipError_t err; hipLaunchKernelGGL(( top_pooling_forward_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, offset, forward_ind, out); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } } void top_pooling_backward_ongpu(float *x, int w, int h, int c, int batch, float *backward_ind, float *out) { int size = w*h*c*batch; hipError_t err; hipLaunchKernelGGL(( top_pooling_backward_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, size, x, w, h, c, batch, backward_ind, out); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } } #ifdef __cplusplus } #endif
157ad18d66026d7115a6cf8ec26d30c2621ca20b.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "top_pooling_cuda_kernel.h" #define BLOCK 512 dim3 cuda_gridsize(int n) { int k = (n-1) / BLOCK + 1; int x = k; int y = 1; if(x > 65535){ x = ceil(sqrt(k)); y = (n-1)/(x*BLOCK) + 1; } dim3 d(x, y, 1); //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK); return d; } __global__ void top_pooling_forward_kernel(int N, float const *x, int w, int h, int c, int batch, float *offset, float *forward_ind, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int sp = in_w + w*(in_h + h*b); sp = in_h+int(offset[sp]); if(sp > h) sp = h; int max_ind = in_index; int out_index = 0; for(int ind=in_h;ind<sp;++ind) { out_index = in_w + w*(ind + h*(in_c + c*b)); if(x[max_ind] < x[out_index]) max_ind = out_index; } out[in_index] = x[max_ind]; forward_ind[in_index] = max_ind; } __global__ void top_pooling_backward_kernel(int N, float const *x, int w, int h, int c, int batch, float *backward_ind, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int max_ind = backward_ind[i]; atomicAdd(&out[max_ind],x[i]); } void top_pooling_forward_ongpu(float *x, int w, int h, int c, int batch, float *offset, float *forward_ind, float *out) { int size = w*h*c*batch; cudaError_t err; top_pooling_forward_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, offset, forward_ind, out); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } void top_pooling_backward_ongpu(float *x, int w, int h, int c, int batch, float *backward_ind, float *out) { int size = w*h*c*batch; cudaError_t err; top_pooling_backward_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, backward_ind, out); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } #ifdef __cplusplus } #endif
a30fab9601c7480f5b01a83f0235ece7a30c5ee8.hip
// !!! This is a file automatically generated by hipify!!! // Global includes #include <bitset> #include <memory> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> // Local includes #include "Multiply.h" #include "GPU/spECKKernels.h" #include "GPU/consistent_gpu_memory.h" #include "CUDATools/stream.h" #include "meta_utils.h" #include "GPU/spECK_HashSpGEMM.cuh" #include "GPU/spECK_HashLoadBalancer.cuh" #include "GPU/HelperFunctions.cuh" #include <thrust/scan.h> #include <thrust/extrema.h> #include "Config.h" #include "common.h" #include "WorkDistribution.h" #include "HashMap.cuh" #include "spECKConfig.h" using IndexType = uint32_t; namespace spECK { template <typename T> __host__ __forceinline__ T divup(T a, T b) { return (a + b - 1) / b; } void startTimerVar(hipEvent_t &start, hipStream_t stream = 0) { HANDLE_ERROR(hipEventRecord(start, stream)); HANDLE_ERROR(hipEventSynchronize(start)); } float recordTimerVar(hipEvent_t &start, hipEvent_t &end, hipStream_t stream = 0) { float time; HANDLE_ERROR(hipEventRecord(end, stream)); HANDLE_ERROR(hipEventSynchronize(end)); HANDLE_ERROR(hipEventElapsedTime(&time, start, end)); return time; } template <typename DataType, int BLOCKS_PER_SM, int THREADS_PER_BLOCK, int MAX_DYNAMIC_SHARED, int MAX_STATIC_SHARED> void MultiplyspECKImplementation(const dCSR<DataType> &matA_Dealloc, const dCSR<DataType> &matB_Dealloc, dCSR<DataType> &matOut, spECKConfig &config, Timings &timings) { // those matrices automatically deallocate memory when used as param for cuda -> therefore i have written a new struct without deallocs dCSRNoDealloc<DataType> matA(matA_Dealloc), matB(matB_Dealloc); if (matB.cols > 1 << 27) { printf("ERROR: matrix B has more than %d columns (%lu)\n", 1 << 27, matB.cols); return; } if (matA.rows > 1 << 27) { printf("ERROR: matrix A has more than %d rows (%lu)\n", 1 << 27, matB.rows); return; } if (matA.nnz * matB.nnz == 0) { matOut.nnz = 0; return; } if (MAX_DYNAMIC_SHARED != config.maxDynamicSharedMemoryPerBlock || MAX_STATIC_SHARED != config.maxStaticSharedMemoryPerBlock) { if (MAX_DYNAMIC_SHARED > config.maxDynamicSharedMemoryPerBlock) { printf("ERROR: spECK was compiled with %d maximum dynamic shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 10: spECK_DYNAMIC_MEM_PER_BLOCK\n", MAX_DYNAMIC_SHARED, config.maxDynamicSharedMemoryPerBlock); return; } else { printf("WARNING: spECK was compiled with %d maximum dynamic shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 10: spECK_DYNAMIC_MEM_PER_BLOCK\n", MAX_DYNAMIC_SHARED, config.maxDynamicSharedMemoryPerBlock); } if (MAX_STATIC_SHARED > MAX_DYNAMIC_SHARED) { printf("ERROR: spECK was compiled with smaller dynamic than static shared memory. (%d maximum static shared memory and %d maximum dynamic shared memory). Please check values in Multiply.h line 9 and 10", MAX_STATIC_SHARED, MAX_DYNAMIC_SHARED); return; } if (MAX_STATIC_SHARED > config.maxStaticSharedMemoryPerBlock) { printf("ERROR: spECK was compiled with %d maximum static shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 9: spECK_STATIC_MEM_PER_BLOCK\n", MAX_STATIC_SHARED, config.maxStaticSharedMemoryPerBlock); return; } else if (MAX_STATIC_SHARED < config.maxStaticSharedMemoryPerBlock) { printf("WARNING: spECK was compiled with %d maximum static shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 9: spECK_STATIC_MEM_PER_BLOCK\n", MAX_STATIC_SHARED, config.maxStaticSharedMemoryPerBlock); } } // ------------------------------------------------------------------------------------------------------------------------------------------- // Constants and configs // ------------------------------------------------------------------------------------------------------------------------------------------- spECKKernels spgemm(1024); const int kernelCountNumeric = 6; const int kernelCountCounting = 6; const int maxRowsPerBlock = 32; // this value may never exceed 32 because of some warp-optimizations const int warpsCounting = THREADS_PER_BLOCK / 32; const int warpsNumeric = THREADS_PER_BLOCK / 32; const int staticSharedMemPerBlockCounting = 48, staticSharedMemPerBlockNumeric = 24; const int sharedBytesPerWarpCounting = MAX_STATIC_SHARED / warpsCounting - staticSharedMemPerBlockCounting; // 48 byte is the maximum static shared memory per block const int entriesPerWarpCounting = sharedBytesPerWarpCounting / sizeof(IndexType); const int sharedBytesPerBlockCounting = sharedBytesPerWarpCounting * warpsCounting; // CC version > 7.0 support dynamic shared memory larger than static shared const int dynamicSharedBytesPerWarpCounting = MAX_DYNAMIC_SHARED / warpsCounting - staticSharedMemPerBlockCounting; // 48 byte is the maximum static shared memory per block const int dynamicEntriesPerWarpCounting = dynamicSharedBytesPerWarpCounting / sizeof(IndexType); const int dynamicSharedBytesPerBlockCounting = dynamicSharedBytesPerWarpCounting * warpsCounting; const int sharedBytesPerWarpNumeric = MAX_STATIC_SHARED / warpsNumeric - staticSharedMemPerBlockNumeric; // 24 byte is the maximum static shared memory per block const int entriesPerWarpNumeric = sharedBytesPerWarpNumeric / (sizeof(IndexType) + sizeof(DataType)); const int sharedBytesPerBlockNumeric = sharedBytesPerWarpNumeric * warpsNumeric; // CC version > 7.0 support dynamic shared memory larger than static shared const int dynamicSharedBytesPerWarpNumeric = MAX_DYNAMIC_SHARED / warpsNumeric - staticSharedMemPerBlockNumeric; // 24 byte is the maximum static shared memory per block const int dynamicEntriesPerWarpNumeric = dynamicSharedBytesPerWarpNumeric / (sizeof(IndexType) + sizeof(DataType)); const int dynamicSharedBytesPerBlockNumeric = dynamicSharedBytesPerWarpNumeric * warpsNumeric; assert(kernelCountCounting <= kernelCountNumeric); bool supportGlobalFallback = true; const uint32_t minimumDensityForDenseModeCounting = 999; const uint32_t denseModeRowThresholdInternalSorting = 999; const uint32_t denseModeRowThresholdExternalSorting = 18; const uint32_t sm = config.sm; const uint32_t cudaCores = config.sm * BLOCKS_PER_SM * 32; // ------------------------------------------------------------------------------------------------------------------------------------------- // INITIAL MALLOCS // ------------------------------------------------------------------------------------------------------------------------------------------- int estimatedAvgComPerRow = max(1, int((matA.nnz / matA.rows) * (matB.nnz / matB.rows))); // determine how many nnz of matC should be calculated by one block. avoid hashmaps running full int maxNnzPerBlockNumeric = entriesPerWarpNumeric * warpsNumeric * 2 / 3; int maxNnzPerBlockNumericDynamicSharedMem = dynamicEntriesPerWarpNumeric * warpsNumeric * 2 / 3; // CUDA variables hipStream_t stream = config.streams[0]; auto &streams = config.streams; if (timings.measureCompleteTime) startTimerVar(config.completeStart, stream); if (timings.measureAll) startTimerVar(config.individualStart, stream); // Allocate memory for offsets CU::unique_ptr newmat_offsets; if (matOut.rows != matA.rows) { newmat_offsets = CU::allocMemory((matA.rows + 1) * sizeof(IndexType)); } else if (matOut.row_offsets != nullptr) { newmat_offsets.consume(reinterpret_cast<hipDeviceptr_t>(matOut.row_offsets)); matOut.row_offsets = nullptr; } dCSRNoDealloc<DataType> matC; matC.row_offsets = newmat_offsets.get<IndexType>(); matC.cols = matB.cols; matC.rows = matA.rows; IndexType *blockStartRowsScale = nullptr; IndexType *blockCounterScale = nullptr; IndexType h_blockCounterScaleNumeric[kernelCountNumeric] = {0}; IndexType h_blockCounterScaleCounting[kernelCountCounting] = {0}; size_t cubTempBytesScan = 0; size_t cubTmpBytesReduce = 0; size_t cubTmpBytesActual = 0; void *cubTmp = nullptr; { hipcub::DeviceScan::ExclusiveSum(cubTmp, cubTempBytesScan, matC.row_offsets, matC.row_offsets, matC.rows + 1); hipcub::DeviceReduce::Sum(cubTmp, cubTmpBytesReduce, matC.row_offsets, matC.row_offsets, matC.rows); cubTmpBytesReduce = ::max(cubTempBytesScan, cubTmpBytesReduce); } // ---------------------------------------------------------------------------------- uint32_t maxComputationsPerRow = 0; uint32_t longestRowALength = 0; IndexType *d_blockStartRows = nullptr; uint32_t *d_blockCounter = nullptr; uint32_t *d_rowOperations = nullptr; uint32_t *d_rowMaxOperations = nullptr; uint32_t *d_maxElementsPerRow = nullptr; uint32_t *d_sumProducts = nullptr; uint32_t *d_rowColMinMax = nullptr; uint32_t *d_maxComputationsPerRow = nullptr; uint32_t *d_combined_pointers; size_t d_combined_pointers_size = sizeof(uint32_t) * (4 + 2 * matA.rows) + divup(cubTempBytesScan, sizeof(uint32_t)) * sizeof(uint32_t); if (matA.nnz > 10000) d_combined_pointers_size += sizeof(uint32_t) * matA.rows; HANDLE_ERROR(hipMalloc(&d_combined_pointers, d_combined_pointers_size)); HANDLE_ERROR(hipMemsetAsync(d_combined_pointers, 0, d_combined_pointers_size)); d_maxElementsPerRow = d_combined_pointers; /* keep this order */ d_sumProducts = &d_maxElementsPerRow[1]; d_maxComputationsPerRow = &d_sumProducts[1]; /* until here */ d_blockCounter = &d_maxComputationsPerRow[1]; d_rowOperations = &d_blockCounter[1]; d_rowMaxOperations = &d_rowOperations[matA.rows]; cubTmp = (void *)&d_rowMaxOperations[matA.rows]; cubTmpBytesActual = cubTempBytesScan; if (matA.nnz > 10000) { d_rowColMinMax = (uint32_t *)cubTmp; d_rowColMinMax = &d_rowColMinMax[divup(cubTempBytesScan, sizeof(uint32_t))]; } if (timings.measureAll) { timings.init = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // COUNT COMPUTATIONS // ------------------------------------------------------------------------------------------------------------------------------------------- uint32_t sumProducts = 0; // calc amount of operations per row { const uint32_t threadsPerBlock = 128U; // limit to threadsPerBlock rows! // -> and always try to stay slightly below the threads per block size, because if you are slightly above, it is way more expensive than being far below uint32_t rowsPerBlock = ::min(threadsPerBlock, ::max(1U, (threadsPerBlock - 8) / ::max(1U, uint32_t(matA.nnz / matA.rows)))); rowsPerBlock = ::max(1U, ::min(rowsPerBlock, uint32_t(matA.rows) / (4U * cudaCores / threadsPerBlock))); hipLaunchKernelGGL(( readOperations<IndexType, DataType, IndexType, threadsPerBlock>), dim3(divup(uint32_t(matA.rows), rowsPerBlock)), dim3(threadsPerBlock), 0, 0, matA, matB, d_rowOperations, rowsPerBlock, d_maxComputationsPerRow, d_rowColMinMax, d_rowMaxOperations, d_sumProducts); // copying both values at once gives a huge performance boost uint32_t tmpArr[2]; HANDLE_ERROR(hipMemcpy(&tmpArr, d_sumProducts, sizeof(uint32_t) * 2, hipMemcpyDeviceToHost)); sumProducts = tmpArr[0]; maxComputationsPerRow = tmpArr[1]; // sumProducts = max(sumProducts, 1); } if (sumProducts == 0) { if (timings.measureCompleteTime) timings.complete = recordTimerVar(config.completeStart, config.completeEnd); matOut.alloc(matA.rows, matB.cols, 0, false); return; } int maxNnzPerBlockCounting = entriesPerWarpCounting * warpsCounting * 4 / 5; int maxNnzPerBlockCountingDynamicSharedMem = dynamicEntriesPerWarpCounting * warpsCounting * 4 / 5; // you always know the maximum size of the output row uint32_t maxRowLength = max(1, min((uint32_t)matB.cols * 12 / 10, maxComputationsPerRow)); if (timings.measureAll) { timings.countProducts = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // LOADBALANCE COUNTING // ------------------------------------------------------------------------------------------------------------------------------------------- uint32_t h_blockCounter = 0; uint32_t rowsPerBlock = 1; if (kernelCountCounting > 5 && maxRowLength < (maxNnzPerBlockCounting >> 4)) { uint32_t maxRowsPerBlockUtilization = max(1, min(uint32_t(maxRowsPerBlock), uint32_t(matA.rows / (sm * BLOCKS_PER_SM << (kernelCountCounting - 2))))); if (maxRowLength < maxNnzPerBlockCounting >> (kernelCountCounting - 1)) { if (estimatedAvgComPerRow / maxRowLength == 1 || maxRowLength / estimatedAvgComPerRow == 1) rowsPerBlock = min(maxRowsPerBlockUtilization, ((maxNnzPerBlockCounting >> (kernelCountCounting - 1)) / 3) / maxRowLength); else rowsPerBlock = min(maxRowsPerBlockUtilization, (maxNnzPerBlockCounting >> kernelCountCounting) / maxRowLength); } rowsPerBlock = max(rowsPerBlock, 1); h_blockCounterScaleCounting[kernelCountCounting - 1] = divup(uint32_t(matA.rows), rowsPerBlock); } else if (kernelCountCounting > 4 && maxRowLength < (maxNnzPerBlockCounting >> 3)) h_blockCounterScaleCounting[4] = matA.rows; else if (kernelCountCounting > 3 && maxRowLength < (maxNnzPerBlockCounting >> 2)) h_blockCounterScaleCounting[3] = matA.rows; else if (kernelCountCounting > 2 && maxRowLength < (maxNnzPerBlockCounting >> 1)) h_blockCounterScaleCounting[2] = matA.rows; else if (kernelCountCounting > 1 && maxRowLength < (maxNnzPerBlockCounting >> 0)) h_blockCounterScaleCounting[1] = matA.rows; else h_blockCounterScaleCounting[0] = matA.rows; uint32_t rowsRequiringGlobal = h_blockCounterScaleCounting[0]; uint32_t actualKernelCount = min(kernelCountCounting, uint32_t( std::log2( divup( int(maxRowLength), min( maxNnzPerBlockCounting >> (kernelCountCounting - 1), maxNnzPerBlockNumeric >> (kernelCountNumeric - 1)))) + 1)); bool useLoadBalancingCounting = false; // TODO check if && maxComputationsPerRow > maxNnzPerBlockCounting / 8 can be removed if (matA.nnz > 771843 || maxComputationsPerRow < maxNnzPerBlockCountingDynamicSharedMem && maxComputationsPerRow > (maxNnzPerBlockCounting >> 2) && matA.rows > 7575 || maxComputationsPerRow > maxNnzPerBlockCountingDynamicSharedMem && sumProducts > 1940177 || maxComputationsPerRow / max(1, int((sumProducts / matA.rows))) > 110 && sumProducts > 1164708) useLoadBalancingCounting = true; if (useLoadBalancingCounting) { size_t combinedBlockStartSize = sizeof(IndexType) * (1 + kernelCountCounting + matA.rows * (1 + actualKernelCount)); HANDLE_ERROR(hipMalloc(&d_blockStartRows, combinedBlockStartSize)); blockStartRowsScale = &d_blockStartRows[matA.rows + 1]; blockCounterScale = &blockStartRowsScale[actualKernelCount * matA.rows]; HANDLE_ERROR(hipMemset(blockCounterScale, 0, sizeof(IndexType) * kernelCountCounting)); // load balance over amount of operations per row in A spgemm.h_AssignHashSpGEMMBlocksToRowsOfSameSizeOperations<uint32_t, DataType, uint8_t, kernelCountCounting>( matA, matB, d_rowOperations, blockStartRowsScale, blockCounterScale, h_blockCounterScaleCounting, d_blockStartRows, maxNnzPerBlockCounting, maxNnzPerBlockCountingDynamicSharedMem, maxRowsPerBlock, actualKernelCount, rowsRequiringGlobal); } else { h_blockCounter = matA.rows; d_blockStartRows = nullptr; } if (timings.measureAll) { timings.loadBalanceCounting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // ALLOCATE GLOBAL MAPS // ------------------------------------------------------------------------------------------------------------------------------------------- int elementsPerMap = (::max(maxRowLength, uint32_t(maxNnzPerBlockCountingDynamicSharedMem)) * 5) / 4; supportGlobalFallback &= maxRowLength > entriesPerWarpCounting * warpsCounting; typedef HashMap<uint32_t, DataType> GlobalMap; typedef HashMapNoValue<uint32_t, 1> GlobalMapRowOffsets; typedef HashMapNoValue<uint32_t, maxRowsPerBlock> GlobalMapNoValue; void *hashMaps = nullptr; IndexType *maps_indices = nullptr; DataType *maps_values = nullptr; uint32_t hashMapCount = 0; size_t globalMapMaxSize; globalMapMaxSize = ::max(sizeof(GlobalMap), sizeof(GlobalMapNoValue)); globalMapMaxSize = ::max(globalMapMaxSize, sizeof(GlobalMapRowOffsets)); if (supportGlobalFallback) { hashMapCount = ::min(sm * BLOCKS_PER_SM, h_blockCounterScaleCounting[0]); hashMapCount = ::min(hashMapCount, rowsRequiringGlobal); supportGlobalFallback &= hashMapCount > 0; } rowsRequiringGlobal = matB.cols < entriesPerWarpCounting * warpsCounting ? 0 : rowsRequiringGlobal; bool isDenseCounting = useLoadBalancingCounting && rowsRequiringGlobal > 0 && maxComputationsPerRow > maxNnzPerBlockCountingDynamicSharedMem * 2; if (isDenseCounting) { supportGlobalFallback = false; // every bit is one column if (matB.cols > (warpsCounting * sharedBytesPerWarpCounting * 8) / 2) { if (longestRowALength == 0) { uint32_t *d_longestRowALength = nullptr; HANDLE_ERROR(hipMalloc(&d_longestRowALength, sizeof(uint32_t))); HANDLE_ERROR(hipMemset(d_longestRowALength, 0, sizeof(uint32_t))); const uint32_t blockdim = 256; const uint32_t rowsPerThread = 2; const uint32_t blocks = divup(IndexType(matA.rows), blockdim * rowsPerThread); hipLaunchKernelGGL(( getLongestRowA<IndexType, blockdim, rowsPerThread>), dim3(blocks), dim3(blockdim), 0, 0, matA.row_offsets, d_longestRowALength, matA.rows, matA.nnz); hipMemcpy(&longestRowALength, d_longestRowALength, sizeof(uint32_t), hipMemcpyDeviceToHost); } // only use global maps if the row cursors can't be held in shared memory if (elementsPerMap * 2 > warpsCounting * entriesPerWarpCounting) { hashMapCount = sm * BLOCKS_PER_SM; elementsPerMap = longestRowALength * 5 / 4; if (maps_indices != nullptr) HANDLE_ERROR(hipFree(maps_indices)); if (hashMaps != nullptr) HANDLE_ERROR(hipFree(hashMaps)); HANDLE_ERROR(hipMalloc(&maps_indices, sizeof(uint32_t) * hashMapCount * (elementsPerMap + maxRowsPerBlock + 1))); HANDLE_ERROR(hipMalloc(&hashMaps, globalMapMaxSize * hashMapCount)); spgemm.setLaunchDimensions(hashMapCount, streams[0], 32 * warpsNumeric); spgemm.h_InitializeGlobalMapsNoVal<GlobalMapRowOffsets, uint32_t>((GlobalMapRowOffsets *)hashMaps, hashMapCount, maps_indices, elementsPerMap, maxRowsPerBlock); } } } if (supportGlobalFallback) { HANDLE_ERROR(hipMalloc(&hashMaps, globalMapMaxSize * hashMapCount)); HANDLE_ERROR(hipMalloc(&maps_indices, sizeof(IndexType) * hashMapCount * (elementsPerMap + maxRowsPerBlock + 1))); spgemm.setLaunchDimensions(hashMapCount, streams[0], 32 * warpsCounting); spgemm.h_InitializeGlobalMapsNoVal<GlobalMapNoValue, IndexType>((GlobalMapNoValue *)hashMaps, hashMapCount, maps_indices, elementsPerMap, maxRowsPerBlock); } if (timings.measureAll) { timings.globalMapsCounting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // PRE-COUNTING LOAD-OPTIMIZATION // ------------------------------------------------------------------------------------------------------------------------------------------- IndexType blockPrefixScaled[kernelCountCounting] = {0}; { uint32_t activeSM = h_blockCounterScaleCounting[0]; // never go up to top level int firstXEmpty = h_blockCounterScaleCounting[0] == 0; bool foundFirstNonEmpty = h_blockCounterScaleCounting[0] != 0; for (int i = 1; i < kernelCountCounting; ++i) { blockPrefixScaled[i] = h_blockCounterScaleCounting[i - 1] + blockPrefixScaled[i - 1]; activeSM += 2 * h_blockCounterScaleCounting[i] >> (i - 1); if (!foundFirstNonEmpty) { if (h_blockCounterScaleCounting[i] == 0) firstXEmpty++; else foundFirstNonEmpty = true; } } // avoid div by zero activeSM = max(activeSM, 1); if (activeSM < sm * BLOCKS_PER_SM) { int shiftUp = min(firstXEmpty, int(std::log2(sm * BLOCKS_PER_SM / activeSM))); if (shiftUp > 0) { for (int i = 0; i < kernelCountCounting; i++) { if (i + shiftUp < kernelCountCounting) { h_blockCounterScaleCounting[i] = h_blockCounterScaleCounting[i + shiftUp]; blockPrefixScaled[i] = blockPrefixScaled[i + shiftUp]; } else { h_blockCounterScaleCounting[i] = 0; blockPrefixScaled[i] = h_blockCounter; } } } } } // ------------------------------------------------------------------------------------------------------------------------------------------- // COUNT NNZ PER ROW OF C // ------------------------------------------------------------------------------------------------------------------------------------------- { if (h_blockCounterScaleCounting[0] > 0) { if (isDenseCounting) { // this only uses 1 block per sm and therefore hash 50% occupancy, but better caching spgemm.setLaunchDimensions(h_blockCounterScaleCounting[0], streams[0], (32 * warpsCounting >> 0), dynamicSharedBytesPerBlockCounting); spgemm.h_DenseSpGEMMCount<IndexType, DataType, GlobalMapRowOffsets, dynamicSharedBytesPerBlockCounting, true, (32 * warpsCounting >> 0)>( matA, matB, (GlobalMapRowOffsets *)hashMaps, hashMapCount, matC.row_offsets, d_blockStartRows + blockPrefixScaled[0], d_rowOperations, h_blockCounterScaleCounting[0], d_rowColMinMax, d_rowMaxOperations, d_maxElementsPerRow, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[0], streams[0], 32 * warpsCounting >> 0, dynamicSharedBytesPerBlockCounting); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, dynamicSharedBytesPerBlockCounting, true, (32 * warpsCounting >> 0)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[0], h_blockCounterScaleCounting[0], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } } if (kernelCountCounting > 1 && h_blockCounterScaleCounting[1] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[1], streams[1], 32 * warpsCounting >> 0, sharedBytesPerBlockCounting >> 0); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 0), false, (32 * warpsCounting >> 0)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[1], h_blockCounterScaleCounting[1], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 2 && h_blockCounterScaleCounting[2] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[2], streams[2], (32 * warpsCounting >> 1), sharedBytesPerBlockCounting >> 1); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 1), false, (32 * warpsCounting >> 1)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[2], h_blockCounterScaleCounting[2], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 3 && h_blockCounterScaleCounting[3] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[3], streams[3], (32 * warpsCounting >> 2), sharedBytesPerBlockCounting >> 2); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 2), false, (32 * warpsCounting >> 2)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[3], h_blockCounterScaleCounting[3], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 4 && h_blockCounterScaleCounting[4] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[4], streams[4], 32 * warpsCounting >> 3, sharedBytesPerBlockCounting >> 3); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 3), false, (32 * warpsCounting >> 3)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[4], h_blockCounterScaleCounting[4], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 5 && h_blockCounterScaleCounting[5] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[5], streams[5], 32 * warpsCounting >> 4, sharedBytesPerBlockCounting >> 4); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 4), false, (32 * warpsCounting >> 4)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[5], h_blockCounterScaleCounting[5], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } } // ------------------------------------------------------------------------------------------------------------------------------------------- // SCAN ROW OFFSETS AND GET NNZ OF C // ------------------------------------------------------------------------------------------------------------------------------------------- // now we need to allocate that memory for prefix scan and for finding the longest row if (cubTmpBytesActual < cubTempBytesScan) { cubTmpBytesActual = cubTempBytesScan; if (cubTmp != nullptr) HANDLE_ERROR(hipFree(cubTmp)); HANDLE_ERROR(hipMalloc(&cubTmp, cubTmpBytesActual)); } // prefix sum to get the starting ids of each row of mat C hipcub::DeviceScan::ExclusiveSum(cubTmp, cubTmpBytesActual, matC.row_offsets, matC.row_offsets, matC.rows + 1); { IndexType nnz; hipMemcpy(&nnz, matC.row_offsets + matC.rows, sizeof(IndexType), hipMemcpyDeviceToHost); matC.nnz = nnz; } if (timings.measureAll) { HANDLE_ERROR(hipDeviceSynchronize()); timings.spGEMMCounting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // ALLOCATE OUTPUT MATRIX C // ------------------------------------------------------------------------------------------------------------------------------------------- // only allocate mem for mat C if size is not correct if (matOut.nnz != matC.nnz) { matOut.alloc(matC.rows, matC.cols, matC.nnz, false); } if (matOut.data == nullptr || matOut.col_ids == nullptr) { if (matOut.nnz > 0) printf("ERROR: out of memory\n"); return; } matOut.row_offsets = std::move(newmat_offsets.getRelease<IndexType>()); matC = dCSRNoDealloc<DataType>(matOut); if (timings.measureAll) { timings.allocC = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // LOAD BALANCE NUMERIC // ------------------------------------------------------------------------------------------------------------------------------------------- uint32_t maxElementsPerRow = maxRowLength; hipMemcpy(&maxElementsPerRow, d_maxElementsPerRow, sizeof(uint32_t), hipMemcpyDeviceToHost); bool reprocessLoadBalanceNumeric = useLoadBalancingCounting; rowsPerBlock = 1; // get the longest row in order to minimize the global map size which needs to be allocated if (kernelCountNumeric > 5 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 4)) { uint32_t maxRowsPerBlockUtilization = max(1, min(uint32_t(maxRowsPerBlock), uint32_t(matA.rows / (sm * BLOCKS_PER_SM << (kernelCountNumeric - 2))))); if (maxElementsPerRow<(entriesPerWarpNumeric * warpsNumeric)>> kernelCountNumeric) { if (maxElementsPerRow / max(1U, uint32_t(matC.nnz / matC.rows)) == 1) rowsPerBlock = min(maxRowsPerBlockUtilization, (maxNnzPerBlockNumeric >> (kernelCountNumeric - 1)) / maxElementsPerRow); else rowsPerBlock = min(maxRowsPerBlockUtilization, (entriesPerWarpNumeric * warpsNumeric >> (kernelCountNumeric - 1)) / maxElementsPerRow); } rowsPerBlock = max(rowsPerBlock, 1); h_blockCounterScaleNumeric[kernelCountNumeric - 1] = divup(uint32_t(matA.rows), rowsPerBlock); } else if (kernelCountNumeric > 4 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 3)) h_blockCounterScaleNumeric[4] = matC.rows; else if (kernelCountNumeric > 3 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 2)) h_blockCounterScaleNumeric[3] = matC.rows; else if (kernelCountNumeric > 2 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 1)) h_blockCounterScaleNumeric[2] = matC.rows; else if (kernelCountNumeric > 1 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 0)) h_blockCounterScaleNumeric[1] = matC.rows; else h_blockCounterScaleNumeric[0] = matC.rows; supportGlobalFallback = true; supportGlobalFallback &= maxElementsPerRow >= maxNnzPerBlockNumericDynamicSharedMem; rowsRequiringGlobal = h_blockCounterScaleNumeric[0]; uint32_t avgElementsPerRow = max(1, int(matC.nnz / matC.rows)); uint32_t maxAvgElementsPerRowRatio = maxElementsPerRow / avgElementsPerRow; reprocessLoadBalanceNumeric = false; if (maxElementsPerRow > (maxNnzPerBlockNumeric >> 2) && matA.rows >= 1236 && sumProducts > 636293 || maxElementsPerRow > (maxNnzPerBlockNumeric >> (kernelCountNumeric - 1)) && ( maxAvgElementsPerRowRatio > 4 && sumProducts > 4921876 || maxAvgElementsPerRowRatio > 13 && sumProducts > 385847 || maxAvgElementsPerRowRatio > 18 && sumProducts > 26263 && avgElementsPerRow > 22 || maxAvgElementsPerRowRatio > 146)) reprocessLoadBalanceNumeric = true; // can bring a performance benefit for some matrices, but has small overhead if (reprocessLoadBalanceNumeric && matC.nnz > 0) { if (d_blockCounter == nullptr) { HANDLE_ERROR(hipMalloc(&d_blockCounter, sizeof(uint32_t))); } if (blockCounterScale == nullptr) { size_t combinedBlockStartSize = sizeof(IndexType) * (1 + kernelCountNumeric + matA.rows * (1 + actualKernelCount)); HANDLE_ERROR(hipMalloc(&d_blockStartRows, combinedBlockStartSize)); blockStartRowsScale = &d_blockStartRows[matA.rows + 1]; blockCounterScale = &blockStartRowsScale[actualKernelCount * matA.rows]; } // reset buffers HANDLE_ERROR(hipMemsetAsync(d_blockCounter, 0, sizeof(uint32_t))); HANDLE_ERROR(hipMemsetAsync(blockCounterScale, 0, sizeof(IndexType) * kernelCountNumeric)); spgemm.h_AssignHashSpGEMMBlocksToRowsOfSameSize<IndexType, DataType, uint8_t, kernelCountNumeric>( matC, blockStartRowsScale, d_blockStartRows, blockCounterScale, h_blockCounterScaleNumeric, maxNnzPerBlockNumeric, maxNnzPerBlockNumericDynamicSharedMem, maxRowsPerBlock, actualKernelCount, rowsRequiringGlobal); } else { HANDLE_ERROR(hipFree(d_blockStartRows)); d_blockStartRows = nullptr; } if (timings.measureAll) { timings.loadBalanceNumeric = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // ALLOCATE GLOBAL MAPS // ------------------------------------------------------------------------------------------------------------------------------------------- // always disabled since we always use dense mode for large rows supportGlobalFallback = false; if (supportGlobalFallback) { // update elements per map now that we know the lengths of each row --> could save some global memory and therefore allocation time elementsPerMap = max(maxElementsPerRow, maxNnzPerBlockNumericDynamicSharedMem) * 3 / 2; supportGlobalFallback &= h_blockCounterScaleNumeric[0] > 0; hashMapCount = min(sm * BLOCKS_PER_SM, h_blockCounterScaleNumeric[0]); hashMapCount = min(hashMapCount, rowsRequiringGlobal); supportGlobalFallback &= hashMapCount > 0; } rowsRequiringGlobal = matB.cols < entriesPerWarpNumeric * warpsNumeric ? 0 : rowsRequiringGlobal; bool isDenseOutput = h_blockCounterScaleNumeric[0] > 0; GlobalMapRowOffsets *rowOffsetMaps = nullptr; IndexType *rowOffsetMapIndices = nullptr; uint32_t rowOffsetMapCount = 0; uint32_t rowOffsetMapElementsPer = 0; if (isDenseOutput) { if (longestRowALength == 0) { uint32_t *d_longestRowALength = nullptr; HANDLE_ERROR(hipMalloc(&d_longestRowALength, sizeof(uint32_t))); HANDLE_ERROR(hipMemset(d_longestRowALength, 0, sizeof(uint32_t))); const uint32_t _threads = 256; const uint32_t rowsPerThread = 2; const uint32_t blocks = divup(IndexType(matA.rows), _threads * rowsPerThread); hipLaunchKernelGGL(( getLongestRowA<IndexType, _threads, rowsPerThread>), dim3(blocks), dim3(_threads), 0, 0, matA.row_offsets, d_longestRowALength, matA.rows, matA.nnz); hipMemcpy(&longestRowALength, d_longestRowALength, sizeof(uint32_t), hipMemcpyDeviceToHost); } rowOffsetMapElementsPer = longestRowALength; rowOffsetMapCount = min(h_blockCounterScaleNumeric[0], sm * BLOCKS_PER_SM); // only allocate global maps if row cursors can't be held in share memory if (elementsPerMap * 2 * sizeof(IndexType) > warpsNumeric * entriesPerWarpNumeric * (sizeof(IndexType) + sizeof(DataType))) { if (h_blockCounterScaleNumeric[0] != 0) { if (rowOffsetMaps != nullptr) HANDLE_ERROR(hipFree(rowOffsetMaps)); HANDLE_ERROR(hipMalloc(&rowOffsetMaps, globalMapMaxSize * rowOffsetMapCount)); if (rowOffsetMapIndices != nullptr) { HANDLE_ERROR(hipFree(rowOffsetMapIndices)); rowOffsetMapIndices = nullptr; } if (rowOffsetMapIndices == nullptr) HANDLE_ERROR(hipMalloc(&rowOffsetMapIndices, sizeof(IndexType) * rowOffsetMapCount * (rowOffsetMapElementsPer + maxRowsPerBlock + 1))); spgemm.setLaunchDimensions(rowOffsetMapCount, stream, 32 * warpsNumeric); spgemm.h_InitializeGlobalMapsNoVal<GlobalMapRowOffsets, uint32_t>((GlobalMapRowOffsets *)rowOffsetMaps, rowOffsetMapCount, rowOffsetMapIndices, rowOffsetMapElementsPer, maxRowsPerBlock); } } } if (timings.measureAll) { timings.globalMapsNumeric = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // PRE-NUMERIC LOAD OPTIMIZATIONS // ------------------------------------------------------------------------------------------------------------------------------------------- // alloc indices for rows which shall be sorted by cub bool sortAllInplace = false; { { uint32_t activeSM = h_blockCounterScaleNumeric[0]; // never go up to top level int firstXEmpty = 0; bool foundFirstNonEmpty = h_blockCounterScaleNumeric[0] != 0; for (int i = 1; i < kernelCountNumeric; ++i) { blockPrefixScaled[i] = h_blockCounterScaleNumeric[i - 1] + blockPrefixScaled[i - 1]; activeSM += 2 * h_blockCounterScaleNumeric[i] >> (i - 1); if (!foundFirstNonEmpty) { if (h_blockCounterScaleNumeric[i] == 0) firstXEmpty++; else foundFirstNonEmpty = true; } } // avoid div by zero activeSM = max(activeSM, 1); if (activeSM < sm * BLOCKS_PER_SM) { int shiftUp = min(firstXEmpty, int(std::log2(sm * BLOCKS_PER_SM / activeSM))); if (shiftUp > 0) { if (firstXEmpty >= 2) sortAllInplace = true; for (int i = 0; i < kernelCountNumeric; i++) { if (i + shiftUp < kernelCountNumeric) { h_blockCounterScaleNumeric[i] = h_blockCounterScaleNumeric[i + shiftUp]; blockPrefixScaled[i] = blockPrefixScaled[i + shiftUp]; } else { h_blockCounterScaleNumeric[i] = 0; blockPrefixScaled[i] = h_blockCounter; } } } } } // inplace starts to be faster if the size of the maps is getting smaller Config::SortModes sortMode = Config::SortModes::CubSegmentedSort; const uint32_t entrySize = sizeof(IndexType) + sizeof(DataType); Config::SpGEMMMethods spGemmMethodNumeric = Config::AutoSpGEMM; // ------------------------------------------------------------------------------------------------------------------------------------------- // NUMERIC SPGEMM // ------------------------------------------------------------------------------------------------------------------------------------------- if (h_blockCounterScaleNumeric[0] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[0], streams[0], 32 * warpsNumeric, dynamicSharedBytesPerBlockNumeric); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, dynamicSharedBytesPerBlockNumeric, true, (32 * warpsNumeric)>( matA, matB, matC, (GlobalMapRowOffsets *)rowOffsetMaps, rowOffsetMapCount, d_blockStartRows, d_rowOperations, h_blockCounterScaleNumeric[0], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } sortMode = sortAllInplace ? Config::InPlace : Config::Separate; bool setSortingBit = sortAllInplace ? false : maxElementsPerRow >= 500; if (kernelCountNumeric > 1 && h_blockCounterScaleNumeric[1] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], (32 * warpsNumeric >> 0), (sharedBytesPerBlockNumeric >> 0)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 0), false, (32 * warpsNumeric >> 0)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[1], d_rowOperations, sortMode, h_blockCounterScaleNumeric[1], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdExternalSorting, setSortingBit, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], 32 * warpsNumeric >> 0, (sharedBytesPerBlockNumeric >> 0)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 0), false, (32 * warpsNumeric >> 0)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[1], d_rowOperations, h_blockCounterScaleNumeric[1], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], 32 * warpsNumeric, (sharedBytesPerBlockNumeric >> 0)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 0), false, (32 * warpsNumeric)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[1], d_rowOperations, sortMode, h_blockCounterScaleNumeric[1], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } } if (kernelCountNumeric > 2 && h_blockCounterScaleNumeric[2] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], (32 * warpsNumeric >> 1), (sharedBytesPerBlockNumeric >> 1)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 1), false, (32 * warpsNumeric >> 1)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[2], d_rowOperations, sortMode, h_blockCounterScaleNumeric[2], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdExternalSorting, setSortingBit, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], 32 * warpsNumeric >> 1, (sharedBytesPerBlockNumeric >> 1)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 1), false, (32 * warpsNumeric >> 1)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[2], d_rowOperations, h_blockCounterScaleNumeric[2], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], 32 * warpsNumeric >> 1, (sharedBytesPerBlockNumeric >> 1)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 1), false, (32 * warpsNumeric >> 1)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[2], d_rowOperations, sortMode, h_blockCounterScaleNumeric[2], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } } sortMode = Config::InPlace; if (kernelCountNumeric > 3 && h_blockCounterScaleNumeric[3] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[3], streams[3], (32 * warpsNumeric >> 2), (sharedBytesPerBlockNumeric >> 2)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 2), false, (32 * warpsNumeric >> 2)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[3], d_rowOperations, sortMode, h_blockCounterScaleNumeric[3], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdInternalSorting, false, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[3], streams[3], 32 * warpsNumeric >> 2, (sharedBytesPerBlockNumeric >> 2)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 2), false, (32 * warpsNumeric >> 2)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[3], d_rowOperations, h_blockCounterScaleNumeric[3], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[3], streams[3], 32 * warpsNumeric >> 2, (sharedBytesPerBlockNumeric >> 2)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 2), false, (32 * warpsNumeric >> 2)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[3], d_rowOperations, sortMode, h_blockCounterScaleNumeric[3], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } } if (kernelCountNumeric > 4 && h_blockCounterScaleNumeric[4] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[4], streams[4], (32 * warpsNumeric >> 3), (sharedBytesPerBlockNumeric >> 3)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 3), false, (32 * warpsNumeric >> 3)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[4], d_rowOperations, sortMode, h_blockCounterScaleNumeric[4], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdInternalSorting, false, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[4], streams[4], 32 * warpsNumeric >> 3, (sharedBytesPerBlockNumeric >> 3)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 3), false, (32 * warpsNumeric >> 3)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[4], d_rowOperations, h_blockCounterScaleNumeric[4], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[4], streams[4], 32 * warpsNumeric >> 3, (sharedBytesPerBlockNumeric >> 3)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 3), false, (32 * warpsNumeric >> 3)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[4], d_rowOperations, sortMode, h_blockCounterScaleNumeric[4], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } } if (kernelCountNumeric > 5 && h_blockCounterScaleNumeric[5] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM || ((rowsPerBlock > 1 || reprocessLoadBalanceNumeric) && spGemmMethodNumeric != Config::HashSpGEMM)) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[5], streams[5], (32 * warpsNumeric >> 4), (sharedBytesPerBlockNumeric >> 4)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 4), false, (32 * warpsNumeric >> 4)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[5], d_rowOperations, sortMode, h_blockCounterScaleNumeric[5], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdInternalSorting, false, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[5], streams[5], 32 * warpsNumeric >> 4, (sharedBytesPerBlockNumeric >> 4)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 4), false, (32 * warpsNumeric >> 4)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[5], d_rowOperations, h_blockCounterScaleNumeric[5], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[5], streams[5], 32 * warpsNumeric >> 4, (sharedBytesPerBlockNumeric >> 4)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 4), false, (32 * warpsNumeric >> 4)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[5], d_rowOperations, sortMode, h_blockCounterScaleNumeric[5], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } } } if (timings.measureAll) { HANDLE_ERROR(hipDeviceSynchronize()); timings.spGEMMNumeric = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // SORT MEDIUM AND LONG ROWS // ------------------------------------------------------------------------------------------------------------------------------------------- if (!sortAllInplace && (h_blockCounterScaleNumeric[1] + h_blockCounterScaleNumeric[2] > 0) && maxElementsPerRow >= 500) { if (h_blockCounterScaleNumeric[2] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], 32 * warpsNumeric / 4); spgemm.h_HashSpGEMMSorting<uint32_t, DataType, 32 * warpsNumeric / 4, entriesPerWarpNumeric * 32 / 2>( matC, d_blockStartRows + blockPrefixScaled[2], h_blockCounterScaleNumeric[2], true); } if (h_blockCounterScaleNumeric[1] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], 32 * warpsNumeric / 2); spgemm.h_HashSpGEMMSorting<uint32_t, DataType, 32 * warpsNumeric / 2, entriesPerWarpNumeric * 32>( matC, d_blockStartRows + blockPrefixScaled[1], h_blockCounterScaleNumeric[1], true); } } if (timings.measureAll) { HANDLE_ERROR(hipDeviceSynchronize()); timings.sorting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // FREE ALLOCATED MEMORY // ------------------------------------------------------------------------------------------------------------------------------------------- if (d_blockStartRows != nullptr) HANDLE_ERROR(hipFree(d_blockStartRows)); if (hashMaps != nullptr) HANDLE_ERROR(hipFree(hashMaps)); if (maps_indices != nullptr) HANDLE_ERROR(hipFree(maps_indices)); if (maps_values != nullptr) HANDLE_ERROR(hipFree(maps_values)); if (d_combined_pointers != nullptr) HANDLE_ERROR(hipFree(d_combined_pointers)); if (rowOffsetMaps != nullptr) HANDLE_ERROR(hipFree(rowOffsetMaps)); if (rowOffsetMapIndices != nullptr) HANDLE_ERROR(hipFree(rowOffsetMapIndices)); if (timings.measureAll) { timings.cleanup = recordTimerVar(config.individualStart, config.individualEnd, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // END // ------------------------------------------------------------------------------------------------------------------------------------------- if (timings.measureCompleteTime) { HANDLE_ERROR(hipDeviceSynchronize()); timings.complete = recordTimerVar(config.completeStart, config.completeEnd, stream); } if (timings.measureAll) { /*printf("elements per global map=%d. mapCount=%d\n", elementsPerMap, hashMapCount); printf("matCNnz=%d, number of blocks = %d, %d, %d, %d, %d, %d\n", matC.nnz, h_blockCounterScaleNumeric[0], kernelCountNumeric > 1 ? h_blockCounterScaleNumeric[1] : -1, kernelCountNumeric > 2 ? h_blockCounterScaleNumeric[2] : -1, kernelCountNumeric > 3 ? h_blockCounterScaleNumeric[3] : -1, kernelCountNumeric > 4 ? h_blockCounterScaleNumeric[4] : -1, kernelCountNumeric > 5 ? h_blockCounterScaleNumeric[5] : -1);*/ if (timings.measureAll) { printf("spECK initial mallocs = %f ms\n", timings.init); printf("spECK count computations = %f ms\n", timings.countProducts); printf("spECK load-balancer = %f ms\n", timings.loadBalanceCounting); printf("spECK GlobalMaps Cnt = %f ms\n", timings.globalMapsCounting); printf("spECK counting kernel = %f ms\n", timings.spGEMMCounting); printf("spECK malloc mat C = %f ms\n", timings.allocC); printf("spECK num load-balancer = %f ms\n", timings.loadBalanceNumeric); printf("spECK init GlobalMaps = %f ms\n", timings.globalMapsNumeric); printf("spECK numeric kernel = %f ms\n", timings.spGEMMNumeric); printf("spECK Sorting kernel = %f ms\n", timings.sorting); printf("spECK cleanup = %f ms\n", timings.cleanup); printf("--------------------------------------------------------------\n"); } if (timings.measureCompleteTime) printf("spECK complete = %f ms\n\n", timings.complete); } matOut.rows = matC.rows; matOut.cols = matC.cols; matOut.nnz = matC.nnz; matOut.col_ids = matC.col_ids; matOut.row_offsets = matC.row_offsets; matOut.data = matC.data; } template <typename DataType, int BLOCKS_PER_SM, int THREADS_PER_BLOCK, int MAX_DYNAMIC_SHARED, int MAX_STATIC_SHARED> void MultiplyspECK(const dCSR<DataType> &A, const dCSR<DataType> &B, dCSR<DataType> &matOut, spECKConfig &config, Timings &timings) { MultiplyspECKImplementation<DataType, BLOCKS_PER_SM, THREADS_PER_BLOCK, MAX_DYNAMIC_SHARED, MAX_STATIC_SHARED>(A, B, matOut, config, timings); } template void MultiplyspECK<float, 4, 1024, spECK_DYNAMIC_MEM_PER_BLOCK, spECK_STATIC_MEM_PER_BLOCK>(const dCSR<float> &A, const dCSR<float> &B, dCSR<float> &matOut, spECKConfig &config, Timings &timings); template void MultiplyspECK<double, 4, 1024, spECK_DYNAMIC_MEM_PER_BLOCK, spECK_STATIC_MEM_PER_BLOCK>(const dCSR<double> &A, const dCSR<double> &B, dCSR<double> &matOut, spECKConfig &config, Timings &timings); } // namespace spECK
a30fab9601c7480f5b01a83f0235ece7a30c5ee8.cu
// Global includes #include <bitset> #include <memory> #include <stdlib.h> #include <cuda_runtime.h> #include <math.h> // Local includes #include "Multiply.h" #include "GPU/spECKKernels.h" #include "GPU/consistent_gpu_memory.h" #include "CUDATools/stream.h" #include "meta_utils.h" #include "GPU/spECK_HashSpGEMM.cuh" #include "GPU/spECK_HashLoadBalancer.cuh" #include "GPU/HelperFunctions.cuh" #include <thrust/scan.h> #include <thrust/extrema.h> #include "Config.h" #include "common.h" #include "WorkDistribution.h" #include "HashMap.cuh" #include "spECKConfig.h" using IndexType = uint32_t; namespace spECK { template <typename T> __host__ __forceinline__ T divup(T a, T b) { return (a + b - 1) / b; } void startTimerVar(cudaEvent_t &start, CUstream stream = 0) { HANDLE_ERROR(cudaEventRecord(start, stream)); HANDLE_ERROR(cudaEventSynchronize(start)); } float recordTimerVar(cudaEvent_t &start, cudaEvent_t &end, CUstream stream = 0) { float time; HANDLE_ERROR(cudaEventRecord(end, stream)); HANDLE_ERROR(cudaEventSynchronize(end)); HANDLE_ERROR(cudaEventElapsedTime(&time, start, end)); return time; } template <typename DataType, int BLOCKS_PER_SM, int THREADS_PER_BLOCK, int MAX_DYNAMIC_SHARED, int MAX_STATIC_SHARED> void MultiplyspECKImplementation(const dCSR<DataType> &matA_Dealloc, const dCSR<DataType> &matB_Dealloc, dCSR<DataType> &matOut, spECKConfig &config, Timings &timings) { // those matrices automatically deallocate memory when used as param for cuda -> therefore i have written a new struct without deallocs dCSRNoDealloc<DataType> matA(matA_Dealloc), matB(matB_Dealloc); if (matB.cols > 1 << 27) { printf("ERROR: matrix B has more than %d columns (%lu)\n", 1 << 27, matB.cols); return; } if (matA.rows > 1 << 27) { printf("ERROR: matrix A has more than %d rows (%lu)\n", 1 << 27, matB.rows); return; } if (matA.nnz * matB.nnz == 0) { matOut.nnz = 0; return; } if (MAX_DYNAMIC_SHARED != config.maxDynamicSharedMemoryPerBlock || MAX_STATIC_SHARED != config.maxStaticSharedMemoryPerBlock) { if (MAX_DYNAMIC_SHARED > config.maxDynamicSharedMemoryPerBlock) { printf("ERROR: spECK was compiled with %d maximum dynamic shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 10: spECK_DYNAMIC_MEM_PER_BLOCK\n", MAX_DYNAMIC_SHARED, config.maxDynamicSharedMemoryPerBlock); return; } else { printf("WARNING: spECK was compiled with %d maximum dynamic shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 10: spECK_DYNAMIC_MEM_PER_BLOCK\n", MAX_DYNAMIC_SHARED, config.maxDynamicSharedMemoryPerBlock); } if (MAX_STATIC_SHARED > MAX_DYNAMIC_SHARED) { printf("ERROR: spECK was compiled with smaller dynamic than static shared memory. (%d maximum static shared memory and %d maximum dynamic shared memory). Please check values in Multiply.h line 9 and 10", MAX_STATIC_SHARED, MAX_DYNAMIC_SHARED); return; } if (MAX_STATIC_SHARED > config.maxStaticSharedMemoryPerBlock) { printf("ERROR: spECK was compiled with %d maximum static shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 9: spECK_STATIC_MEM_PER_BLOCK\n", MAX_STATIC_SHARED, config.maxStaticSharedMemoryPerBlock); return; } else if (MAX_STATIC_SHARED < config.maxStaticSharedMemoryPerBlock) { printf("WARNING: spECK was compiled with %d maximum static shared memory, but device limit is %d. Please recompile with correct amount set in Multiply.h line 9: spECK_STATIC_MEM_PER_BLOCK\n", MAX_STATIC_SHARED, config.maxStaticSharedMemoryPerBlock); } } // ------------------------------------------------------------------------------------------------------------------------------------------- // Constants and configs // ------------------------------------------------------------------------------------------------------------------------------------------- spECKKernels spgemm(1024); const int kernelCountNumeric = 6; const int kernelCountCounting = 6; const int maxRowsPerBlock = 32; // this value may never exceed 32 because of some warp-optimizations const int warpsCounting = THREADS_PER_BLOCK / 32; const int warpsNumeric = THREADS_PER_BLOCK / 32; const int staticSharedMemPerBlockCounting = 48, staticSharedMemPerBlockNumeric = 24; const int sharedBytesPerWarpCounting = MAX_STATIC_SHARED / warpsCounting - staticSharedMemPerBlockCounting; // 48 byte is the maximum static shared memory per block const int entriesPerWarpCounting = sharedBytesPerWarpCounting / sizeof(IndexType); const int sharedBytesPerBlockCounting = sharedBytesPerWarpCounting * warpsCounting; // CC version > 7.0 support dynamic shared memory larger than static shared const int dynamicSharedBytesPerWarpCounting = MAX_DYNAMIC_SHARED / warpsCounting - staticSharedMemPerBlockCounting; // 48 byte is the maximum static shared memory per block const int dynamicEntriesPerWarpCounting = dynamicSharedBytesPerWarpCounting / sizeof(IndexType); const int dynamicSharedBytesPerBlockCounting = dynamicSharedBytesPerWarpCounting * warpsCounting; const int sharedBytesPerWarpNumeric = MAX_STATIC_SHARED / warpsNumeric - staticSharedMemPerBlockNumeric; // 24 byte is the maximum static shared memory per block const int entriesPerWarpNumeric = sharedBytesPerWarpNumeric / (sizeof(IndexType) + sizeof(DataType)); const int sharedBytesPerBlockNumeric = sharedBytesPerWarpNumeric * warpsNumeric; // CC version > 7.0 support dynamic shared memory larger than static shared const int dynamicSharedBytesPerWarpNumeric = MAX_DYNAMIC_SHARED / warpsNumeric - staticSharedMemPerBlockNumeric; // 24 byte is the maximum static shared memory per block const int dynamicEntriesPerWarpNumeric = dynamicSharedBytesPerWarpNumeric / (sizeof(IndexType) + sizeof(DataType)); const int dynamicSharedBytesPerBlockNumeric = dynamicSharedBytesPerWarpNumeric * warpsNumeric; assert(kernelCountCounting <= kernelCountNumeric); bool supportGlobalFallback = true; const uint32_t minimumDensityForDenseModeCounting = 999; const uint32_t denseModeRowThresholdInternalSorting = 999; const uint32_t denseModeRowThresholdExternalSorting = 18; const uint32_t sm = config.sm; const uint32_t cudaCores = config.sm * BLOCKS_PER_SM * 32; // ------------------------------------------------------------------------------------------------------------------------------------------- // INITIAL MALLOCS // ------------------------------------------------------------------------------------------------------------------------------------------- int estimatedAvgComPerRow = max(1, int((matA.nnz / matA.rows) * (matB.nnz / matB.rows))); // determine how many nnz of matC should be calculated by one block. avoid hashmaps running full int maxNnzPerBlockNumeric = entriesPerWarpNumeric * warpsNumeric * 2 / 3; int maxNnzPerBlockNumericDynamicSharedMem = dynamicEntriesPerWarpNumeric * warpsNumeric * 2 / 3; // CUDA variables CUstream stream = config.streams[0]; auto &streams = config.streams; if (timings.measureCompleteTime) startTimerVar(config.completeStart, stream); if (timings.measureAll) startTimerVar(config.individualStart, stream); // Allocate memory for offsets CU::unique_ptr newmat_offsets; if (matOut.rows != matA.rows) { newmat_offsets = CU::allocMemory((matA.rows + 1) * sizeof(IndexType)); } else if (matOut.row_offsets != nullptr) { newmat_offsets.consume(reinterpret_cast<CUdeviceptr>(matOut.row_offsets)); matOut.row_offsets = nullptr; } dCSRNoDealloc<DataType> matC; matC.row_offsets = newmat_offsets.get<IndexType>(); matC.cols = matB.cols; matC.rows = matA.rows; IndexType *blockStartRowsScale = nullptr; IndexType *blockCounterScale = nullptr; IndexType h_blockCounterScaleNumeric[kernelCountNumeric] = {0}; IndexType h_blockCounterScaleCounting[kernelCountCounting] = {0}; size_t cubTempBytesScan = 0; size_t cubTmpBytesReduce = 0; size_t cubTmpBytesActual = 0; void *cubTmp = nullptr; { cub::DeviceScan::ExclusiveSum(cubTmp, cubTempBytesScan, matC.row_offsets, matC.row_offsets, matC.rows + 1); cub::DeviceReduce::Sum(cubTmp, cubTmpBytesReduce, matC.row_offsets, matC.row_offsets, matC.rows); cubTmpBytesReduce = std::max(cubTempBytesScan, cubTmpBytesReduce); } // ---------------------------------------------------------------------------------- uint32_t maxComputationsPerRow = 0; uint32_t longestRowALength = 0; IndexType *d_blockStartRows = nullptr; uint32_t *d_blockCounter = nullptr; uint32_t *d_rowOperations = nullptr; uint32_t *d_rowMaxOperations = nullptr; uint32_t *d_maxElementsPerRow = nullptr; uint32_t *d_sumProducts = nullptr; uint32_t *d_rowColMinMax = nullptr; uint32_t *d_maxComputationsPerRow = nullptr; uint32_t *d_combined_pointers; size_t d_combined_pointers_size = sizeof(uint32_t) * (4 + 2 * matA.rows) + divup(cubTempBytesScan, sizeof(uint32_t)) * sizeof(uint32_t); if (matA.nnz > 10000) d_combined_pointers_size += sizeof(uint32_t) * matA.rows; HANDLE_ERROR(cudaMalloc(&d_combined_pointers, d_combined_pointers_size)); HANDLE_ERROR(cudaMemsetAsync(d_combined_pointers, 0, d_combined_pointers_size)); d_maxElementsPerRow = d_combined_pointers; /* keep this order */ d_sumProducts = &d_maxElementsPerRow[1]; d_maxComputationsPerRow = &d_sumProducts[1]; /* until here */ d_blockCounter = &d_maxComputationsPerRow[1]; d_rowOperations = &d_blockCounter[1]; d_rowMaxOperations = &d_rowOperations[matA.rows]; cubTmp = (void *)&d_rowMaxOperations[matA.rows]; cubTmpBytesActual = cubTempBytesScan; if (matA.nnz > 10000) { d_rowColMinMax = (uint32_t *)cubTmp; d_rowColMinMax = &d_rowColMinMax[divup(cubTempBytesScan, sizeof(uint32_t))]; } if (timings.measureAll) { timings.init = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // COUNT COMPUTATIONS // ------------------------------------------------------------------------------------------------------------------------------------------- uint32_t sumProducts = 0; // calc amount of operations per row { const uint32_t threadsPerBlock = 128U; // limit to threadsPerBlock rows! // -> and always try to stay slightly below the threads per block size, because if you are slightly above, it is way more expensive than being far below uint32_t rowsPerBlock = std::min(threadsPerBlock, std::max(1U, (threadsPerBlock - 8) / std::max(1U, uint32_t(matA.nnz / matA.rows)))); rowsPerBlock = std::max(1U, std::min(rowsPerBlock, uint32_t(matA.rows) / (4U * cudaCores / threadsPerBlock))); readOperations<IndexType, DataType, IndexType, threadsPerBlock><<<divup(uint32_t(matA.rows), rowsPerBlock), threadsPerBlock>>>( matA, matB, d_rowOperations, rowsPerBlock, d_maxComputationsPerRow, d_rowColMinMax, d_rowMaxOperations, d_sumProducts); // copying both values at once gives a huge performance boost uint32_t tmpArr[2]; HANDLE_ERROR(cudaMemcpy(&tmpArr, d_sumProducts, sizeof(uint32_t) * 2, cudaMemcpyDeviceToHost)); sumProducts = tmpArr[0]; maxComputationsPerRow = tmpArr[1]; // sumProducts = max(sumProducts, 1); } if (sumProducts == 0) { if (timings.measureCompleteTime) timings.complete = recordTimerVar(config.completeStart, config.completeEnd); matOut.alloc(matA.rows, matB.cols, 0, false); return; } int maxNnzPerBlockCounting = entriesPerWarpCounting * warpsCounting * 4 / 5; int maxNnzPerBlockCountingDynamicSharedMem = dynamicEntriesPerWarpCounting * warpsCounting * 4 / 5; // you always know the maximum size of the output row uint32_t maxRowLength = max(1, min((uint32_t)matB.cols * 12 / 10, maxComputationsPerRow)); if (timings.measureAll) { timings.countProducts = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // LOADBALANCE COUNTING // ------------------------------------------------------------------------------------------------------------------------------------------- uint32_t h_blockCounter = 0; uint32_t rowsPerBlock = 1; if (kernelCountCounting > 5 && maxRowLength < (maxNnzPerBlockCounting >> 4)) { uint32_t maxRowsPerBlockUtilization = max(1, min(uint32_t(maxRowsPerBlock), uint32_t(matA.rows / (sm * BLOCKS_PER_SM << (kernelCountCounting - 2))))); if (maxRowLength < maxNnzPerBlockCounting >> (kernelCountCounting - 1)) { if (estimatedAvgComPerRow / maxRowLength == 1 || maxRowLength / estimatedAvgComPerRow == 1) rowsPerBlock = min(maxRowsPerBlockUtilization, ((maxNnzPerBlockCounting >> (kernelCountCounting - 1)) / 3) / maxRowLength); else rowsPerBlock = min(maxRowsPerBlockUtilization, (maxNnzPerBlockCounting >> kernelCountCounting) / maxRowLength); } rowsPerBlock = max(rowsPerBlock, 1); h_blockCounterScaleCounting[kernelCountCounting - 1] = divup(uint32_t(matA.rows), rowsPerBlock); } else if (kernelCountCounting > 4 && maxRowLength < (maxNnzPerBlockCounting >> 3)) h_blockCounterScaleCounting[4] = matA.rows; else if (kernelCountCounting > 3 && maxRowLength < (maxNnzPerBlockCounting >> 2)) h_blockCounterScaleCounting[3] = matA.rows; else if (kernelCountCounting > 2 && maxRowLength < (maxNnzPerBlockCounting >> 1)) h_blockCounterScaleCounting[2] = matA.rows; else if (kernelCountCounting > 1 && maxRowLength < (maxNnzPerBlockCounting >> 0)) h_blockCounterScaleCounting[1] = matA.rows; else h_blockCounterScaleCounting[0] = matA.rows; uint32_t rowsRequiringGlobal = h_blockCounterScaleCounting[0]; uint32_t actualKernelCount = min(kernelCountCounting, uint32_t( std::log2( divup( int(maxRowLength), min( maxNnzPerBlockCounting >> (kernelCountCounting - 1), maxNnzPerBlockNumeric >> (kernelCountNumeric - 1)))) + 1)); bool useLoadBalancingCounting = false; // TODO check if && maxComputationsPerRow > maxNnzPerBlockCounting / 8 can be removed if (matA.nnz > 771843 || maxComputationsPerRow < maxNnzPerBlockCountingDynamicSharedMem && maxComputationsPerRow > (maxNnzPerBlockCounting >> 2) && matA.rows > 7575 || maxComputationsPerRow > maxNnzPerBlockCountingDynamicSharedMem && sumProducts > 1940177 || maxComputationsPerRow / max(1, int((sumProducts / matA.rows))) > 110 && sumProducts > 1164708) useLoadBalancingCounting = true; if (useLoadBalancingCounting) { size_t combinedBlockStartSize = sizeof(IndexType) * (1 + kernelCountCounting + matA.rows * (1 + actualKernelCount)); HANDLE_ERROR(cudaMalloc(&d_blockStartRows, combinedBlockStartSize)); blockStartRowsScale = &d_blockStartRows[matA.rows + 1]; blockCounterScale = &blockStartRowsScale[actualKernelCount * matA.rows]; HANDLE_ERROR(cudaMemset(blockCounterScale, 0, sizeof(IndexType) * kernelCountCounting)); // load balance over amount of operations per row in A spgemm.h_AssignHashSpGEMMBlocksToRowsOfSameSizeOperations<uint32_t, DataType, uint8_t, kernelCountCounting>( matA, matB, d_rowOperations, blockStartRowsScale, blockCounterScale, h_blockCounterScaleCounting, d_blockStartRows, maxNnzPerBlockCounting, maxNnzPerBlockCountingDynamicSharedMem, maxRowsPerBlock, actualKernelCount, rowsRequiringGlobal); } else { h_blockCounter = matA.rows; d_blockStartRows = nullptr; } if (timings.measureAll) { timings.loadBalanceCounting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // ALLOCATE GLOBAL MAPS // ------------------------------------------------------------------------------------------------------------------------------------------- int elementsPerMap = (std::max(maxRowLength, uint32_t(maxNnzPerBlockCountingDynamicSharedMem)) * 5) / 4; supportGlobalFallback &= maxRowLength > entriesPerWarpCounting * warpsCounting; typedef HashMap<uint32_t, DataType> GlobalMap; typedef HashMapNoValue<uint32_t, 1> GlobalMapRowOffsets; typedef HashMapNoValue<uint32_t, maxRowsPerBlock> GlobalMapNoValue; void *hashMaps = nullptr; IndexType *maps_indices = nullptr; DataType *maps_values = nullptr; uint32_t hashMapCount = 0; size_t globalMapMaxSize; globalMapMaxSize = std::max(sizeof(GlobalMap), sizeof(GlobalMapNoValue)); globalMapMaxSize = std::max(globalMapMaxSize, sizeof(GlobalMapRowOffsets)); if (supportGlobalFallback) { hashMapCount = std::min(sm * BLOCKS_PER_SM, h_blockCounterScaleCounting[0]); hashMapCount = std::min(hashMapCount, rowsRequiringGlobal); supportGlobalFallback &= hashMapCount > 0; } rowsRequiringGlobal = matB.cols < entriesPerWarpCounting * warpsCounting ? 0 : rowsRequiringGlobal; bool isDenseCounting = useLoadBalancingCounting && rowsRequiringGlobal > 0 && maxComputationsPerRow > maxNnzPerBlockCountingDynamicSharedMem * 2; if (isDenseCounting) { supportGlobalFallback = false; // every bit is one column if (matB.cols > (warpsCounting * sharedBytesPerWarpCounting * 8) / 2) { if (longestRowALength == 0) { uint32_t *d_longestRowALength = nullptr; HANDLE_ERROR(cudaMalloc(&d_longestRowALength, sizeof(uint32_t))); HANDLE_ERROR(cudaMemset(d_longestRowALength, 0, sizeof(uint32_t))); const uint32_t blockdim = 256; const uint32_t rowsPerThread = 2; const uint32_t blocks = divup(IndexType(matA.rows), blockdim * rowsPerThread); getLongestRowA<IndexType, blockdim, rowsPerThread><<<blocks, blockdim>>>(matA.row_offsets, d_longestRowALength, matA.rows, matA.nnz); cudaMemcpy(&longestRowALength, d_longestRowALength, sizeof(uint32_t), cudaMemcpyDeviceToHost); } // only use global maps if the row cursors can't be held in shared memory if (elementsPerMap * 2 > warpsCounting * entriesPerWarpCounting) { hashMapCount = sm * BLOCKS_PER_SM; elementsPerMap = longestRowALength * 5 / 4; if (maps_indices != nullptr) HANDLE_ERROR(cudaFree(maps_indices)); if (hashMaps != nullptr) HANDLE_ERROR(cudaFree(hashMaps)); HANDLE_ERROR(cudaMalloc(&maps_indices, sizeof(uint32_t) * hashMapCount * (elementsPerMap + maxRowsPerBlock + 1))); HANDLE_ERROR(cudaMalloc(&hashMaps, globalMapMaxSize * hashMapCount)); spgemm.setLaunchDimensions(hashMapCount, streams[0], 32 * warpsNumeric); spgemm.h_InitializeGlobalMapsNoVal<GlobalMapRowOffsets, uint32_t>((GlobalMapRowOffsets *)hashMaps, hashMapCount, maps_indices, elementsPerMap, maxRowsPerBlock); } } } if (supportGlobalFallback) { HANDLE_ERROR(cudaMalloc(&hashMaps, globalMapMaxSize * hashMapCount)); HANDLE_ERROR(cudaMalloc(&maps_indices, sizeof(IndexType) * hashMapCount * (elementsPerMap + maxRowsPerBlock + 1))); spgemm.setLaunchDimensions(hashMapCount, streams[0], 32 * warpsCounting); spgemm.h_InitializeGlobalMapsNoVal<GlobalMapNoValue, IndexType>((GlobalMapNoValue *)hashMaps, hashMapCount, maps_indices, elementsPerMap, maxRowsPerBlock); } if (timings.measureAll) { timings.globalMapsCounting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // PRE-COUNTING LOAD-OPTIMIZATION // ------------------------------------------------------------------------------------------------------------------------------------------- IndexType blockPrefixScaled[kernelCountCounting] = {0}; { uint32_t activeSM = h_blockCounterScaleCounting[0]; // never go up to top level int firstXEmpty = h_blockCounterScaleCounting[0] == 0; bool foundFirstNonEmpty = h_blockCounterScaleCounting[0] != 0; for (int i = 1; i < kernelCountCounting; ++i) { blockPrefixScaled[i] = h_blockCounterScaleCounting[i - 1] + blockPrefixScaled[i - 1]; activeSM += 2 * h_blockCounterScaleCounting[i] >> (i - 1); if (!foundFirstNonEmpty) { if (h_blockCounterScaleCounting[i] == 0) firstXEmpty++; else foundFirstNonEmpty = true; } } // avoid div by zero activeSM = max(activeSM, 1); if (activeSM < sm * BLOCKS_PER_SM) { int shiftUp = min(firstXEmpty, int(std::log2(sm * BLOCKS_PER_SM / activeSM))); if (shiftUp > 0) { for (int i = 0; i < kernelCountCounting; i++) { if (i + shiftUp < kernelCountCounting) { h_blockCounterScaleCounting[i] = h_blockCounterScaleCounting[i + shiftUp]; blockPrefixScaled[i] = blockPrefixScaled[i + shiftUp]; } else { h_blockCounterScaleCounting[i] = 0; blockPrefixScaled[i] = h_blockCounter; } } } } } // ------------------------------------------------------------------------------------------------------------------------------------------- // COUNT NNZ PER ROW OF C // ------------------------------------------------------------------------------------------------------------------------------------------- { if (h_blockCounterScaleCounting[0] > 0) { if (isDenseCounting) { // this only uses 1 block per sm and therefore hash 50% occupancy, but better caching spgemm.setLaunchDimensions(h_blockCounterScaleCounting[0], streams[0], (32 * warpsCounting >> 0), dynamicSharedBytesPerBlockCounting); spgemm.h_DenseSpGEMMCount<IndexType, DataType, GlobalMapRowOffsets, dynamicSharedBytesPerBlockCounting, true, (32 * warpsCounting >> 0)>( matA, matB, (GlobalMapRowOffsets *)hashMaps, hashMapCount, matC.row_offsets, d_blockStartRows + blockPrefixScaled[0], d_rowOperations, h_blockCounterScaleCounting[0], d_rowColMinMax, d_rowMaxOperations, d_maxElementsPerRow, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[0], streams[0], 32 * warpsCounting >> 0, dynamicSharedBytesPerBlockCounting); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, dynamicSharedBytesPerBlockCounting, true, (32 * warpsCounting >> 0)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[0], h_blockCounterScaleCounting[0], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } } if (kernelCountCounting > 1 && h_blockCounterScaleCounting[1] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[1], streams[1], 32 * warpsCounting >> 0, sharedBytesPerBlockCounting >> 0); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 0), false, (32 * warpsCounting >> 0)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[1], h_blockCounterScaleCounting[1], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 2 && h_blockCounterScaleCounting[2] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[2], streams[2], (32 * warpsCounting >> 1), sharedBytesPerBlockCounting >> 1); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 1), false, (32 * warpsCounting >> 1)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[2], h_blockCounterScaleCounting[2], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 3 && h_blockCounterScaleCounting[3] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[3], streams[3], (32 * warpsCounting >> 2), sharedBytesPerBlockCounting >> 2); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 2), false, (32 * warpsCounting >> 2)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[3], h_blockCounterScaleCounting[3], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 4 && h_blockCounterScaleCounting[4] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[4], streams[4], 32 * warpsCounting >> 3, sharedBytesPerBlockCounting >> 3); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 3), false, (32 * warpsCounting >> 3)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[4], h_blockCounterScaleCounting[4], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } if (kernelCountCounting > 5 && h_blockCounterScaleCounting[5] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleCounting[5], streams[5], 32 * warpsCounting >> 4, sharedBytesPerBlockCounting >> 4); spgemm.h_SpGEMMCountLauncher<IndexType, DataType, maxRowsPerBlock, GlobalMapNoValue, GlobalMapRowOffsets, (sharedBytesPerBlockCounting >> 4), false, (32 * warpsCounting >> 4)>( matA, matB, (GlobalMapNoValue *)hashMaps, hashMapCount, nullptr, 0, matC.row_offsets, d_rowOperations, d_blockStartRows + blockPrefixScaled[5], h_blockCounterScaleCounting[5], d_rowColMinMax, d_rowMaxOperations, minimumDensityForDenseModeCounting, d_maxElementsPerRow, rowsPerBlock); } } // ------------------------------------------------------------------------------------------------------------------------------------------- // SCAN ROW OFFSETS AND GET NNZ OF C // ------------------------------------------------------------------------------------------------------------------------------------------- // now we need to allocate that memory for prefix scan and for finding the longest row if (cubTmpBytesActual < cubTempBytesScan) { cubTmpBytesActual = cubTempBytesScan; if (cubTmp != nullptr) HANDLE_ERROR(cudaFree(cubTmp)); HANDLE_ERROR(cudaMalloc(&cubTmp, cubTmpBytesActual)); } // prefix sum to get the starting ids of each row of mat C cub::DeviceScan::ExclusiveSum(cubTmp, cubTmpBytesActual, matC.row_offsets, matC.row_offsets, matC.rows + 1); { IndexType nnz; cudaMemcpy(&nnz, matC.row_offsets + matC.rows, sizeof(IndexType), cudaMemcpyDeviceToHost); matC.nnz = nnz; } if (timings.measureAll) { HANDLE_ERROR(cudaDeviceSynchronize()); timings.spGEMMCounting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // ALLOCATE OUTPUT MATRIX C // ------------------------------------------------------------------------------------------------------------------------------------------- // only allocate mem for mat C if size is not correct if (matOut.nnz != matC.nnz) { matOut.alloc(matC.rows, matC.cols, matC.nnz, false); } if (matOut.data == nullptr || matOut.col_ids == nullptr) { if (matOut.nnz > 0) printf("ERROR: out of memory\n"); return; } matOut.row_offsets = std::move(newmat_offsets.getRelease<IndexType>()); matC = dCSRNoDealloc<DataType>(matOut); if (timings.measureAll) { timings.allocC = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // LOAD BALANCE NUMERIC // ------------------------------------------------------------------------------------------------------------------------------------------- uint32_t maxElementsPerRow = maxRowLength; cudaMemcpy(&maxElementsPerRow, d_maxElementsPerRow, sizeof(uint32_t), cudaMemcpyDeviceToHost); bool reprocessLoadBalanceNumeric = useLoadBalancingCounting; rowsPerBlock = 1; // get the longest row in order to minimize the global map size which needs to be allocated if (kernelCountNumeric > 5 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 4)) { uint32_t maxRowsPerBlockUtilization = max(1, min(uint32_t(maxRowsPerBlock), uint32_t(matA.rows / (sm * BLOCKS_PER_SM << (kernelCountNumeric - 2))))); if (maxElementsPerRow<(entriesPerWarpNumeric * warpsNumeric)>> kernelCountNumeric) { if (maxElementsPerRow / max(1U, uint32_t(matC.nnz / matC.rows)) == 1) rowsPerBlock = min(maxRowsPerBlockUtilization, (maxNnzPerBlockNumeric >> (kernelCountNumeric - 1)) / maxElementsPerRow); else rowsPerBlock = min(maxRowsPerBlockUtilization, (entriesPerWarpNumeric * warpsNumeric >> (kernelCountNumeric - 1)) / maxElementsPerRow); } rowsPerBlock = max(rowsPerBlock, 1); h_blockCounterScaleNumeric[kernelCountNumeric - 1] = divup(uint32_t(matA.rows), rowsPerBlock); } else if (kernelCountNumeric > 4 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 3)) h_blockCounterScaleNumeric[4] = matC.rows; else if (kernelCountNumeric > 3 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 2)) h_blockCounterScaleNumeric[3] = matC.rows; else if (kernelCountNumeric > 2 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 1)) h_blockCounterScaleNumeric[2] = matC.rows; else if (kernelCountNumeric > 1 && maxElementsPerRow < (maxNnzPerBlockNumeric >> 0)) h_blockCounterScaleNumeric[1] = matC.rows; else h_blockCounterScaleNumeric[0] = matC.rows; supportGlobalFallback = true; supportGlobalFallback &= maxElementsPerRow >= maxNnzPerBlockNumericDynamicSharedMem; rowsRequiringGlobal = h_blockCounterScaleNumeric[0]; uint32_t avgElementsPerRow = max(1, int(matC.nnz / matC.rows)); uint32_t maxAvgElementsPerRowRatio = maxElementsPerRow / avgElementsPerRow; reprocessLoadBalanceNumeric = false; if (maxElementsPerRow > (maxNnzPerBlockNumeric >> 2) && matA.rows >= 1236 && sumProducts > 636293 || maxElementsPerRow > (maxNnzPerBlockNumeric >> (kernelCountNumeric - 1)) && ( maxAvgElementsPerRowRatio > 4 && sumProducts > 4921876 || maxAvgElementsPerRowRatio > 13 && sumProducts > 385847 || maxAvgElementsPerRowRatio > 18 && sumProducts > 26263 && avgElementsPerRow > 22 || maxAvgElementsPerRowRatio > 146)) reprocessLoadBalanceNumeric = true; // can bring a performance benefit for some matrices, but has small overhead if (reprocessLoadBalanceNumeric && matC.nnz > 0) { if (d_blockCounter == nullptr) { HANDLE_ERROR(cudaMalloc(&d_blockCounter, sizeof(uint32_t))); } if (blockCounterScale == nullptr) { size_t combinedBlockStartSize = sizeof(IndexType) * (1 + kernelCountNumeric + matA.rows * (1 + actualKernelCount)); HANDLE_ERROR(cudaMalloc(&d_blockStartRows, combinedBlockStartSize)); blockStartRowsScale = &d_blockStartRows[matA.rows + 1]; blockCounterScale = &blockStartRowsScale[actualKernelCount * matA.rows]; } // reset buffers HANDLE_ERROR(cudaMemsetAsync(d_blockCounter, 0, sizeof(uint32_t))); HANDLE_ERROR(cudaMemsetAsync(blockCounterScale, 0, sizeof(IndexType) * kernelCountNumeric)); spgemm.h_AssignHashSpGEMMBlocksToRowsOfSameSize<IndexType, DataType, uint8_t, kernelCountNumeric>( matC, blockStartRowsScale, d_blockStartRows, blockCounterScale, h_blockCounterScaleNumeric, maxNnzPerBlockNumeric, maxNnzPerBlockNumericDynamicSharedMem, maxRowsPerBlock, actualKernelCount, rowsRequiringGlobal); } else { HANDLE_ERROR(cudaFree(d_blockStartRows)); d_blockStartRows = nullptr; } if (timings.measureAll) { timings.loadBalanceNumeric = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // ALLOCATE GLOBAL MAPS // ------------------------------------------------------------------------------------------------------------------------------------------- // always disabled since we always use dense mode for large rows supportGlobalFallback = false; if (supportGlobalFallback) { // update elements per map now that we know the lengths of each row --> could save some global memory and therefore allocation time elementsPerMap = max(maxElementsPerRow, maxNnzPerBlockNumericDynamicSharedMem) * 3 / 2; supportGlobalFallback &= h_blockCounterScaleNumeric[0] > 0; hashMapCount = min(sm * BLOCKS_PER_SM, h_blockCounterScaleNumeric[0]); hashMapCount = min(hashMapCount, rowsRequiringGlobal); supportGlobalFallback &= hashMapCount > 0; } rowsRequiringGlobal = matB.cols < entriesPerWarpNumeric * warpsNumeric ? 0 : rowsRequiringGlobal; bool isDenseOutput = h_blockCounterScaleNumeric[0] > 0; GlobalMapRowOffsets *rowOffsetMaps = nullptr; IndexType *rowOffsetMapIndices = nullptr; uint32_t rowOffsetMapCount = 0; uint32_t rowOffsetMapElementsPer = 0; if (isDenseOutput) { if (longestRowALength == 0) { uint32_t *d_longestRowALength = nullptr; HANDLE_ERROR(cudaMalloc(&d_longestRowALength, sizeof(uint32_t))); HANDLE_ERROR(cudaMemset(d_longestRowALength, 0, sizeof(uint32_t))); const uint32_t _threads = 256; const uint32_t rowsPerThread = 2; const uint32_t blocks = divup(IndexType(matA.rows), _threads * rowsPerThread); getLongestRowA<IndexType, _threads, rowsPerThread><<<blocks, _threads>>>(matA.row_offsets, d_longestRowALength, matA.rows, matA.nnz); cudaMemcpy(&longestRowALength, d_longestRowALength, sizeof(uint32_t), cudaMemcpyDeviceToHost); } rowOffsetMapElementsPer = longestRowALength; rowOffsetMapCount = min(h_blockCounterScaleNumeric[0], sm * BLOCKS_PER_SM); // only allocate global maps if row cursors can't be held in share memory if (elementsPerMap * 2 * sizeof(IndexType) > warpsNumeric * entriesPerWarpNumeric * (sizeof(IndexType) + sizeof(DataType))) { if (h_blockCounterScaleNumeric[0] != 0) { if (rowOffsetMaps != nullptr) HANDLE_ERROR(cudaFree(rowOffsetMaps)); HANDLE_ERROR(cudaMalloc(&rowOffsetMaps, globalMapMaxSize * rowOffsetMapCount)); if (rowOffsetMapIndices != nullptr) { HANDLE_ERROR(cudaFree(rowOffsetMapIndices)); rowOffsetMapIndices = nullptr; } if (rowOffsetMapIndices == nullptr) HANDLE_ERROR(cudaMalloc(&rowOffsetMapIndices, sizeof(IndexType) * rowOffsetMapCount * (rowOffsetMapElementsPer + maxRowsPerBlock + 1))); spgemm.setLaunchDimensions(rowOffsetMapCount, stream, 32 * warpsNumeric); spgemm.h_InitializeGlobalMapsNoVal<GlobalMapRowOffsets, uint32_t>((GlobalMapRowOffsets *)rowOffsetMaps, rowOffsetMapCount, rowOffsetMapIndices, rowOffsetMapElementsPer, maxRowsPerBlock); } } } if (timings.measureAll) { timings.globalMapsNumeric = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // PRE-NUMERIC LOAD OPTIMIZATIONS // ------------------------------------------------------------------------------------------------------------------------------------------- // alloc indices for rows which shall be sorted by cub bool sortAllInplace = false; { { uint32_t activeSM = h_blockCounterScaleNumeric[0]; // never go up to top level int firstXEmpty = 0; bool foundFirstNonEmpty = h_blockCounterScaleNumeric[0] != 0; for (int i = 1; i < kernelCountNumeric; ++i) { blockPrefixScaled[i] = h_blockCounterScaleNumeric[i - 1] + blockPrefixScaled[i - 1]; activeSM += 2 * h_blockCounterScaleNumeric[i] >> (i - 1); if (!foundFirstNonEmpty) { if (h_blockCounterScaleNumeric[i] == 0) firstXEmpty++; else foundFirstNonEmpty = true; } } // avoid div by zero activeSM = max(activeSM, 1); if (activeSM < sm * BLOCKS_PER_SM) { int shiftUp = min(firstXEmpty, int(std::log2(sm * BLOCKS_PER_SM / activeSM))); if (shiftUp > 0) { if (firstXEmpty >= 2) sortAllInplace = true; for (int i = 0; i < kernelCountNumeric; i++) { if (i + shiftUp < kernelCountNumeric) { h_blockCounterScaleNumeric[i] = h_blockCounterScaleNumeric[i + shiftUp]; blockPrefixScaled[i] = blockPrefixScaled[i + shiftUp]; } else { h_blockCounterScaleNumeric[i] = 0; blockPrefixScaled[i] = h_blockCounter; } } } } } // inplace starts to be faster if the size of the maps is getting smaller Config::SortModes sortMode = Config::SortModes::CubSegmentedSort; const uint32_t entrySize = sizeof(IndexType) + sizeof(DataType); Config::SpGEMMMethods spGemmMethodNumeric = Config::AutoSpGEMM; // ------------------------------------------------------------------------------------------------------------------------------------------- // NUMERIC SPGEMM // ------------------------------------------------------------------------------------------------------------------------------------------- if (h_blockCounterScaleNumeric[0] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[0], streams[0], 32 * warpsNumeric, dynamicSharedBytesPerBlockNumeric); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, dynamicSharedBytesPerBlockNumeric, true, (32 * warpsNumeric)>( matA, matB, matC, (GlobalMapRowOffsets *)rowOffsetMaps, rowOffsetMapCount, d_blockStartRows, d_rowOperations, h_blockCounterScaleNumeric[0], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } sortMode = sortAllInplace ? Config::InPlace : Config::Separate; bool setSortingBit = sortAllInplace ? false : maxElementsPerRow >= 500; if (kernelCountNumeric > 1 && h_blockCounterScaleNumeric[1] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], (32 * warpsNumeric >> 0), (sharedBytesPerBlockNumeric >> 0)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 0), false, (32 * warpsNumeric >> 0)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[1], d_rowOperations, sortMode, h_blockCounterScaleNumeric[1], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdExternalSorting, setSortingBit, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], 32 * warpsNumeric >> 0, (sharedBytesPerBlockNumeric >> 0)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 0), false, (32 * warpsNumeric >> 0)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[1], d_rowOperations, h_blockCounterScaleNumeric[1], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], 32 * warpsNumeric, (sharedBytesPerBlockNumeric >> 0)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 0), false, (32 * warpsNumeric)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[1], d_rowOperations, sortMode, h_blockCounterScaleNumeric[1], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } } if (kernelCountNumeric > 2 && h_blockCounterScaleNumeric[2] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], (32 * warpsNumeric >> 1), (sharedBytesPerBlockNumeric >> 1)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 1), false, (32 * warpsNumeric >> 1)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[2], d_rowOperations, sortMode, h_blockCounterScaleNumeric[2], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdExternalSorting, setSortingBit, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], 32 * warpsNumeric >> 1, (sharedBytesPerBlockNumeric >> 1)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 1), false, (32 * warpsNumeric >> 1)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[2], d_rowOperations, h_blockCounterScaleNumeric[2], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], 32 * warpsNumeric >> 1, (sharedBytesPerBlockNumeric >> 1)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 1), false, (32 * warpsNumeric >> 1)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[2], d_rowOperations, sortMode, h_blockCounterScaleNumeric[2], d_rowColMinMax, d_rowMaxOperations, setSortingBit, rowsPerBlock); } } sortMode = Config::InPlace; if (kernelCountNumeric > 3 && h_blockCounterScaleNumeric[3] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[3], streams[3], (32 * warpsNumeric >> 2), (sharedBytesPerBlockNumeric >> 2)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 2), false, (32 * warpsNumeric >> 2)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[3], d_rowOperations, sortMode, h_blockCounterScaleNumeric[3], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdInternalSorting, false, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[3], streams[3], 32 * warpsNumeric >> 2, (sharedBytesPerBlockNumeric >> 2)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 2), false, (32 * warpsNumeric >> 2)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[3], d_rowOperations, h_blockCounterScaleNumeric[3], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[3], streams[3], 32 * warpsNumeric >> 2, (sharedBytesPerBlockNumeric >> 2)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 2), false, (32 * warpsNumeric >> 2)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[3], d_rowOperations, sortMode, h_blockCounterScaleNumeric[3], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } } if (kernelCountNumeric > 4 && h_blockCounterScaleNumeric[4] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[4], streams[4], (32 * warpsNumeric >> 3), (sharedBytesPerBlockNumeric >> 3)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 3), false, (32 * warpsNumeric >> 3)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[4], d_rowOperations, sortMode, h_blockCounterScaleNumeric[4], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdInternalSorting, false, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[4], streams[4], 32 * warpsNumeric >> 3, (sharedBytesPerBlockNumeric >> 3)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 3), false, (32 * warpsNumeric >> 3)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[4], d_rowOperations, h_blockCounterScaleNumeric[4], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[4], streams[4], 32 * warpsNumeric >> 3, (sharedBytesPerBlockNumeric >> 3)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 3), false, (32 * warpsNumeric >> 3)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[4], d_rowOperations, sortMode, h_blockCounterScaleNumeric[4], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } } if (kernelCountNumeric > 5 && h_blockCounterScaleNumeric[5] > 0) { if (spGemmMethodNumeric == Config::AutoSpGEMM || ((rowsPerBlock > 1 || reprocessLoadBalanceNumeric) && spGemmMethodNumeric != Config::HashSpGEMM)) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[5], streams[5], (32 * warpsNumeric >> 4), (sharedBytesPerBlockNumeric >> 4)); spgemm.h_SpGEMMNumericLauncher<IndexType, DataType, GlobalMap, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 4), false, (32 * warpsNumeric >> 4)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, rowOffsetMaps, rowOffsetMapCount, d_blockStartRows + blockPrefixScaled[5], d_rowOperations, sortMode, h_blockCounterScaleNumeric[5], d_rowColMinMax, d_rowMaxOperations, denseModeRowThresholdInternalSorting, false, rowsPerBlock); } else if (spGemmMethodNumeric == Config::DenseSpGEMM) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[5], streams[5], 32 * warpsNumeric >> 4, (sharedBytesPerBlockNumeric >> 4)); spgemm.h_DenseSpGEMMNumeric<IndexType, DataType, GlobalMapRowOffsets, (sharedBytesPerBlockNumeric >> 4), false, (32 * warpsNumeric >> 4)>( matA, matB, matC, (GlobalMapRowOffsets *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[5], d_rowOperations, h_blockCounterScaleNumeric[5], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } else { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[5], streams[5], 32 * warpsNumeric >> 4, (sharedBytesPerBlockNumeric >> 4)); spgemm.h_HashSpGEMMNumeric<IndexType, DataType, GlobalMap, (sharedBytesPerBlockNumeric >> 4), false, (32 * warpsNumeric >> 4)>( matA, matB, matC, (GlobalMap *)hashMaps, hashMapCount, d_blockStartRows + blockPrefixScaled[5], d_rowOperations, sortMode, h_blockCounterScaleNumeric[5], d_rowColMinMax, d_rowMaxOperations, false, rowsPerBlock); } } } if (timings.measureAll) { HANDLE_ERROR(cudaDeviceSynchronize()); timings.spGEMMNumeric = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // SORT MEDIUM AND LONG ROWS // ------------------------------------------------------------------------------------------------------------------------------------------- if (!sortAllInplace && (h_blockCounterScaleNumeric[1] + h_blockCounterScaleNumeric[2] > 0) && maxElementsPerRow >= 500) { if (h_blockCounterScaleNumeric[2] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[2], streams[2], 32 * warpsNumeric / 4); spgemm.h_HashSpGEMMSorting<uint32_t, DataType, 32 * warpsNumeric / 4, entriesPerWarpNumeric * 32 / 2>( matC, d_blockStartRows + blockPrefixScaled[2], h_blockCounterScaleNumeric[2], true); } if (h_blockCounterScaleNumeric[1] > 0) { spgemm.setLaunchDimensions(h_blockCounterScaleNumeric[1], streams[1], 32 * warpsNumeric / 2); spgemm.h_HashSpGEMMSorting<uint32_t, DataType, 32 * warpsNumeric / 2, entriesPerWarpNumeric * 32>( matC, d_blockStartRows + blockPrefixScaled[1], h_blockCounterScaleNumeric[1], true); } } if (timings.measureAll) { HANDLE_ERROR(cudaDeviceSynchronize()); timings.sorting = recordTimerVar(config.individualStart, config.individualEnd, stream); startTimerVar(config.individualStart, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // FREE ALLOCATED MEMORY // ------------------------------------------------------------------------------------------------------------------------------------------- if (d_blockStartRows != nullptr) HANDLE_ERROR(cudaFree(d_blockStartRows)); if (hashMaps != nullptr) HANDLE_ERROR(cudaFree(hashMaps)); if (maps_indices != nullptr) HANDLE_ERROR(cudaFree(maps_indices)); if (maps_values != nullptr) HANDLE_ERROR(cudaFree(maps_values)); if (d_combined_pointers != nullptr) HANDLE_ERROR(cudaFree(d_combined_pointers)); if (rowOffsetMaps != nullptr) HANDLE_ERROR(cudaFree(rowOffsetMaps)); if (rowOffsetMapIndices != nullptr) HANDLE_ERROR(cudaFree(rowOffsetMapIndices)); if (timings.measureAll) { timings.cleanup = recordTimerVar(config.individualStart, config.individualEnd, stream); } // ------------------------------------------------------------------------------------------------------------------------------------------- // END // ------------------------------------------------------------------------------------------------------------------------------------------- if (timings.measureCompleteTime) { HANDLE_ERROR(cudaDeviceSynchronize()); timings.complete = recordTimerVar(config.completeStart, config.completeEnd, stream); } if (timings.measureAll) { /*printf("elements per global map=%d. mapCount=%d\n", elementsPerMap, hashMapCount); printf("matCNnz=%d, number of blocks = %d, %d, %d, %d, %d, %d\n", matC.nnz, h_blockCounterScaleNumeric[0], kernelCountNumeric > 1 ? h_blockCounterScaleNumeric[1] : -1, kernelCountNumeric > 2 ? h_blockCounterScaleNumeric[2] : -1, kernelCountNumeric > 3 ? h_blockCounterScaleNumeric[3] : -1, kernelCountNumeric > 4 ? h_blockCounterScaleNumeric[4] : -1, kernelCountNumeric > 5 ? h_blockCounterScaleNumeric[5] : -1);*/ if (timings.measureAll) { printf("spECK initial mallocs = %f ms\n", timings.init); printf("spECK count computations = %f ms\n", timings.countProducts); printf("spECK load-balancer = %f ms\n", timings.loadBalanceCounting); printf("spECK GlobalMaps Cnt = %f ms\n", timings.globalMapsCounting); printf("spECK counting kernel = %f ms\n", timings.spGEMMCounting); printf("spECK malloc mat C = %f ms\n", timings.allocC); printf("spECK num load-balancer = %f ms\n", timings.loadBalanceNumeric); printf("spECK init GlobalMaps = %f ms\n", timings.globalMapsNumeric); printf("spECK numeric kernel = %f ms\n", timings.spGEMMNumeric); printf("spECK Sorting kernel = %f ms\n", timings.sorting); printf("spECK cleanup = %f ms\n", timings.cleanup); printf("--------------------------------------------------------------\n"); } if (timings.measureCompleteTime) printf("spECK complete = %f ms\n\n", timings.complete); } matOut.rows = matC.rows; matOut.cols = matC.cols; matOut.nnz = matC.nnz; matOut.col_ids = matC.col_ids; matOut.row_offsets = matC.row_offsets; matOut.data = matC.data; } template <typename DataType, int BLOCKS_PER_SM, int THREADS_PER_BLOCK, int MAX_DYNAMIC_SHARED, int MAX_STATIC_SHARED> void MultiplyspECK(const dCSR<DataType> &A, const dCSR<DataType> &B, dCSR<DataType> &matOut, spECKConfig &config, Timings &timings) { MultiplyspECKImplementation<DataType, BLOCKS_PER_SM, THREADS_PER_BLOCK, MAX_DYNAMIC_SHARED, MAX_STATIC_SHARED>(A, B, matOut, config, timings); } template void MultiplyspECK<float, 4, 1024, spECK_DYNAMIC_MEM_PER_BLOCK, spECK_STATIC_MEM_PER_BLOCK>(const dCSR<float> &A, const dCSR<float> &B, dCSR<float> &matOut, spECKConfig &config, Timings &timings); template void MultiplyspECK<double, 4, 1024, spECK_DYNAMIC_MEM_PER_BLOCK, spECK_STATIC_MEM_PER_BLOCK>(const dCSR<double> &A, const dCSR<double> &B, dCSR<double> &matOut, spECKConfig &config, Timings &timings); } // namespace spECK
12ee1f61ea55744f4e08879fc523854c0460eaf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" #define COLUMNS 3 #define ROWS 2 __global__ void add(int *a, int *b, int *c) { int x = blockIdx.x; int y = blockIdx.y; int i = (COLUMNS*y) + x; c[i] = a[i] + b[i]; } int main() { int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS]; int *dev_a, *dev_b, *dev_c; hipMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int)); hipMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int)); hipMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int)); for (int y = 0; y < ROWS; y++) for (int x = 0; x < COLUMNS; x++) { a[y][x] = x; b[y][x] = y; } hipMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int), hipMemcpyHostToDevice); dim3 grid(COLUMNS,ROWS); hipLaunchKernelGGL(( add), dim3(grid),dim3(1), 0, 0, dev_a, dev_b, dev_c); hipMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int), hipMemcpyDeviceToHost); for (int y = 0; y < ROWS; y++) { for (int x = 0; x < COLUMNS; x++) { printf("[%d][%d]=%d ",y,x,c[y][x]); } printf("\n"); } return 0; }
12ee1f61ea55744f4e08879fc523854c0460eaf1.cu
#include "stdio.h" #define COLUMNS 3 #define ROWS 2 __global__ void add(int *a, int *b, int *c) { int x = blockIdx.x; int y = blockIdx.y; int i = (COLUMNS*y) + x; c[i] = a[i] + b[i]; } int main() { int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS]; int *dev_a, *dev_b, *dev_c; cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int)); cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int)); cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int)); for (int y = 0; y < ROWS; y++) for (int x = 0; x < COLUMNS; x++) { a[y][x] = x; b[y][x] = y; } cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int), cudaMemcpyHostToDevice); dim3 grid(COLUMNS,ROWS); add<<<grid,1>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int), cudaMemcpyDeviceToHost); for (int y = 0; y < ROWS; y++) { for (int x = 0; x < COLUMNS; x++) { printf("[%d][%d]=%d ",y,x,c[y][x]); } printf("\n"); } return 0; }
a43e04af18a0e279d8096a9078cc9d0b9ebdfb73.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #ifndef PAIRWISE_TRANSFORM_CU #define PAIRWISE_TRANSFORM_CU #ifdef __HIPCC__ #include "../pairwise_transform.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <op_boilerplate.h> #include <helpers/TAD.h> #include <types/float16.h> #include <helpers/DebugHelper.h> #include "../legacy_ops.h" /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ template <typename T, typename opType> __device__ void pairWiseTransformGeneric( T *dx, T *dy, T *params, T *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo) { functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda<opType>( dx, xShapeInfo, dy, yShapeInfo, result, resultShapeInfo, params, allocationPointer, nullptr, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* extern "C" __global__ void pairWiseTransformDouble( int opNum, double *dx, double *dy, double *params, double *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<double>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformFloat( int opNum, float *dx, float *dy, float *params, float *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformHalf( int opNum, float16 *dx, float16 *dy, float16 *params, float16 *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float16>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } */ /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* template <typename T> __device__ void pairWiseTransformGeneric( int opNum, T *dx, T *dy, T *params, T *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::pairwise_transforms::PairWiseTransform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); functions::pairwise_transforms::PairWiseTransform<T>::transformCuda( opNum, dx, xShapeInfo, dy, yShapeInfo, result, resultShapeInfo, params, xIndexes, yIndexes, resultIndexes, allocationPointer, manager, tadOnlyShapeInfo); } */ /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* extern "C" __global__ void pairWiseTransformDoubleIndex( int opNum, double *dx, double *dy, double *params, double *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<double>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformFloatIndex( int opNum, float *dx, float *dy, float *params, float *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformHalfIndex( int opNum, float16 *dx, float16 *dy, float16 *params, float16 *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float16>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } */ /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ template<typename T, typename opType> __device__ void pairWiseTransformStridedGeneric( Nd4jLong n, T *dx, T *dy, Nd4jLong incx, Nd4jLong incy, T *params, T *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo) { functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda<opType>( n, dx, dy, incx, incy, params, result, incz, allocationPointer, nullptr, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* extern "C" __global__ void pairWiseTransformStridedDouble( int opNum, Nd4jLong n, double *dx, double *dy, int incx, int incy, double *params, double *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<double>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformStridedFloat( int opNum, Nd4jLong n, float *dx, float *dy, int incx, int incy, float *params, float *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<float>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformStridedHalf( int opNum, Nd4jLong n, float16 *dx, float16 *dy, int incx, int incy, float16 *params, float16 *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<float16>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } */ // pwt shape DISPATCH_KERNEL_SIMPLE(pwtSimpleShaped_, pairWiseTransformGeneric, float, INPUT(float *dx, float *dy, float *params, float *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleShaped_, pairWiseTransformGeneric, double, INPUT(double *dx, double *dy, double *params, double *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleShaped_, pairWiseTransformGeneric, float16, INPUT(float16 *dx, float16 *dy, float16 *params, float16 *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) // pwt strided DISPATCH_KERNEL_SIMPLE(pwtSimpleStrided_, pairWiseTransformStridedGeneric, float, INPUT(Nd4jLong n, float *dx, float *dy, Nd4jLong incx, Nd4jLong incy, float *params, float *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleStrided_, pairWiseTransformStridedGeneric, double, INPUT(Nd4jLong n, double *dx, double *dy, Nd4jLong incx, Nd4jLong incy, double *params, double *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleStrided_, pairWiseTransformStridedGeneric, float16, INPUT(Nd4jLong n, float16 *dx, float16 *dy, Nd4jLong incx, Nd4jLong incy, float16 *params, float16 *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) namespace functions { namespace pairwise_transforms { template<> __host__ void PairWiseTransform<float>::execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *dx, Nd4jLong xStride, float *y, Nd4jLong yStride, float *result, Nd4jLong resultStride, float *extraParams, Nd4jLong n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { printf("F4 opNum:[%i]; <<<X: [%i]; Y: [%i]; Z: [%i]>>>\n", opNum, launchDims.x,launchDims.y, launchDims.z); } int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformStridedFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>> ( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleStrided, float, PARAMS(n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<float16>::execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *dx, Nd4jLong xStride, float16 *y, Nd4jLong yStride, float16 *result, Nd4jLong resultStride, float16 *extraParams, Nd4jLong n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformStridedHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>> ( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleStrided, float16, PARAMS(n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<double>::execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *dx, Nd4jLong xStride, double *y, Nd4jLong yStride, double *result, Nd4jLong resultStride, double *extraParams, Nd4jLong n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformStridedDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>> ( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleStrided, double, PARAMS(n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<float>::execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *dx, Nd4jLong *xShapeInfo, float *y, Nd4jLong *yShapeInfo, float *result, Nd4jLong *resultShapeInfo, float *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D6 opNum:[%i]\n", opNum); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleShaped, float, PARAMS(dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<float16>::execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *y, Nd4jLong *yShapeInfo, float16 *result, Nd4jLong *resultShapeInfo, float16 *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H6 opNum:[%i]\n", opNum); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleShaped, float16, PARAMS(dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<double>::execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *dx, Nd4jLong *xShapeInfo, double *y, Nd4jLong *yShapeInfo, double *result, Nd4jLong *resultShapeInfo, double *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H6 opNum:[%i]\n", opNum); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleShaped, double, PARAMS(dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } /* template<typename T> __device__ void PairWiseTransform<T>::transformCuda(const int opNum, Nd4jLong n, T *dx, T *y, int incx, int incy, T *extraParams, T *result, int incz, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(n, dx, y, incx, incy, extraParams, result, incz, allocationPointer, manager, tadOnlyShapeInfo), PAIRWISE_TRANSFORM_OPS); } template<typename T> __device__ void PairWiseTransform<T>::transformCuda(const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager,int *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, allocationPointer, manager, tadOnlyShapeInfo), PAIRWISE_TRANSFORM_OPS); } */ /* template<typename T> __device__ void PairWiseTransform<T>::transformCuda(const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(transform, PARAMS(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, yIndexes, resultIndexes, allocationPointer, manager, tadOnlyShapeInfo), PAIRWISE_TRANSFORM_OPS); } */ /* template<typename T> template<typename OpType> __device__ void PairWiseTransform<T>::transform(T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; Nd4jLong n = shape::length(xShapeBuffer); for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { result[resultIndexes[i]] = OpType::op(dx[indexes[i]],y[yIndexes[i]], extraParams); } } */ /** * */ template<typename T> template<typename OpType> __device__ void PairWiseTransform<T>::transformCuda(T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int xRank; __shared__ int yRank; __shared__ int resultRank; __shared__ int xEWS; __shared__ int yEWS; __shared__ int zEWS; __shared__ char xOrder; __shared__ char yOrder; __shared__ char zOrder; __shared__ bool xRow; __shared__ bool yRow; __shared__ bool zRow; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); yRank = shape::rank(yShapeBuffer); resultRank = shape::rank(resultShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(resultShapeBuffer); xOrder = shape::order(xShapeBuffer); yOrder = shape::order(yShapeBuffer); zOrder = shape::order(resultShapeBuffer); xRow = shape::isRowVector(xShapeBuffer); yRow = shape::isRowVector(yShapeBuffer); zRow = shape::isRowVector(resultShapeBuffer); } __syncthreads(); Nd4jLong n = shape::length(xShapeBuffer); if((xEWS >= 1 && yEWS == xEWS && zEWS == xEWS && xOrder == yOrder && zOrder == xOrder) || (xEWS >= 1 && yEWS == xEWS && zEWS == xEWS && xRow && yRow && zRow)) { // TODO: this is wrong, and should be moved to host side transformCuda<OpType>( n, dx, y, xEWS, yEWS, extraParams, result, zEWS, allocationPointer, manager, tadOnlyShapeInfo); } else { if (dx == result) { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(xRank,shape::shapeOf(xShapeBuffer), i, n, xCoord); shape::ind2subC(yRank,shape::shapeOf(yShapeBuffer), i, n, yCoord); auto xOffset = shape::getOffset(0, shape::shapeOf(xShapeBuffer), shape::stride(xShapeBuffer), xCoord, xRank); auto yOffset = shape::getOffset(0, shape::shapeOf(yShapeBuffer), shape::stride(yShapeBuffer), yCoord, yRank); result[xOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong resultCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(xRank,shape::shapeOf(xShapeBuffer), i, n, xCoord); shape::ind2subC(yRank,shape::shapeOf(yShapeBuffer), i, n, yCoord); shape::ind2subC(resultRank,shape::shapeOf(resultShapeBuffer), i, n, resultCoord); auto xOffset = shape::getOffset(0, shape::shapeOf(xShapeBuffer), shape::stride(xShapeBuffer), xCoord, xRank); auto yOffset = shape::getOffset(0, shape::shapeOf(yShapeBuffer), shape::stride(yShapeBuffer), yCoord, yRank); auto resultOffset = shape::getOffset(0, shape::shapeOf(resultShapeBuffer), shape::stride(resultShapeBuffer), resultCoord, resultRank); result[resultOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } } } /* template<typename T> __device__ void transform( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, Nd4jLong n, int *indexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { transform(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, indexes, indexes, allocationPointer, manager, tadOnlyShapeInfo); } template<typename T> __device__ void transform( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { transform(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, yIndexes, indexes, allocationPointer, manager, tadOnlyShapeInfo); } */ /** * * @param n * @param xOffset * @param yOffset * @param resultOffset * @param dx * @param dy * @param incx * @param incy * @param params * @param result * @param incz * @param blockSize */ template<typename T> template<typename OpType> __device__ void PairWiseTransform<T>::transformCuda( Nd4jLong n, T *dx, T *dy, Nd4jLong incx, Nd4jLong incy, T *params, T *result, Nd4jLong incz,int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (incx == incy && incy == incz && incx == 1) { for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { result[i] = OpType::op(dx[i], dy[i], params); } } else { for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { result[i * incz] = OpType::op(dx[i * incx], dy[i * incy], params); } } } } } #endif // CUDA_CC #endif // PAIRWISE_TRANSFORM_CU
a43e04af18a0e279d8096a9078cc9d0b9ebdfb73.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #ifndef PAIRWISE_TRANSFORM_CU #define PAIRWISE_TRANSFORM_CU #ifdef __CUDACC__ #include "../pairwise_transform.h" #include <cuda.h> #include <cuda_runtime.h> #include <op_boilerplate.h> #include <helpers/TAD.h> #include <types/float16.h> #include <helpers/DebugHelper.h> #include "../legacy_ops.h" /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ template <typename T, typename opType> __device__ void pairWiseTransformGeneric( T *dx, T *dy, T *params, T *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo) { functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda<opType>( dx, xShapeInfo, dy, yShapeInfo, result, resultShapeInfo, params, allocationPointer, nullptr, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* extern "C" __global__ void pairWiseTransformDouble( int opNum, double *dx, double *dy, double *params, double *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<double>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformFloat( int opNum, float *dx, float *dy, float *params, float *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformHalf( int opNum, float16 *dx, float16 *dy, float16 *params, float16 *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float16>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo); } */ /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* template <typename T> __device__ void pairWiseTransformGeneric( int opNum, T *dx, T *dy, T *params, T *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::pairwise_transforms::PairWiseTransform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); functions::pairwise_transforms::PairWiseTransform<T>::transformCuda( opNum, dx, xShapeInfo, dy, yShapeInfo, result, resultShapeInfo, params, xIndexes, yIndexes, resultIndexes, allocationPointer, manager, tadOnlyShapeInfo); } */ /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* extern "C" __global__ void pairWiseTransformDoubleIndex( int opNum, double *dx, double *dy, double *params, double *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<double>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformFloatIndex( int opNum, float *dx, float *dy, float *params, float *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformHalfIndex( int opNum, float16 *dx, float16 *dy, float16 *params, float16 *result, int *xShapeInfo, int xRank, int *yShapeInfo, int yRank, int *resultShapeInfo, int zRank, int *xIndexes, int *yIndexes, int *resultIndexes, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformGeneric<float16>( opNum, dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, xIndexes, yIndexes, resultIndexes, allocationPointer, tadOnlyShapeInfo); } */ /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ template<typename T, typename opType> __device__ void pairWiseTransformStridedGeneric( Nd4jLong n, T *dx, T *dy, Nd4jLong incx, Nd4jLong incy, T *params, T *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo) { functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda<opType>( n, dx, dy, incx, incy, params, result, incz, allocationPointer, nullptr, tadOnlyShapeInfo); } /** * The api for the driver interface * @param opNum the op number * @param n the length of the problem * @param xOffset the offset for x * @param yOffset the offset for y * @param resultOffset the offset for result * @param dx the input * @param dy the pair wise array * @param incx the stride for x * @param incy the stride for y * @param params the parameters for the problem * @param result the result buffer * @param incz the result stride * @param blockSize the block size */ /* extern "C" __global__ void pairWiseTransformStridedDouble( int opNum, Nd4jLong n, double *dx, double *dy, int incx, int incy, double *params, double *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<double>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformStridedFloat( int opNum, Nd4jLong n, float *dx, float *dy, int incx, int incy, float *params, float *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<float>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void pairWiseTransformStridedHalf( int opNum, Nd4jLong n, float16 *dx, float16 *dy, int incx, int incy, float16 *params, float16 *result, int incz, int *allocationPointer, int *tadOnlyShapeInfo) { pairWiseTransformStridedGeneric<float16>( opNum, n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo); } */ // pwt shape DISPATCH_KERNEL_SIMPLE(pwtSimpleShaped_, pairWiseTransformGeneric, float, INPUT(float *dx, float *dy, float *params, float *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleShaped_, pairWiseTransformGeneric, double, INPUT(double *dx, double *dy, double *params, double *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleShaped_, pairWiseTransformGeneric, float16, INPUT(float16 *dx, float16 *dy, float16 *params, float16 *result, Nd4jLong *xShapeInfo, int xRank, Nd4jLong *yShapeInfo, int yRank, Nd4jLong *resultShapeInfo, int zRank, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(dx, dy, params, result, xShapeInfo, xRank, yShapeInfo, yRank, resultShapeInfo, zRank, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) // pwt strided DISPATCH_KERNEL_SIMPLE(pwtSimpleStrided_, pairWiseTransformStridedGeneric, float, INPUT(Nd4jLong n, float *dx, float *dy, Nd4jLong incx, Nd4jLong incy, float *params, float *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleStrided_, pairWiseTransformStridedGeneric, double, INPUT(Nd4jLong n, double *dx, double *dy, Nd4jLong incx, Nd4jLong incy, double *params, double *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(pwtSimpleStrided_, pairWiseTransformStridedGeneric, float16, INPUT(Nd4jLong n, float16 *dx, float16 *dy, Nd4jLong incx, Nd4jLong incy, float16 *params, float16 *result, Nd4jLong incz, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo), PARAMS(n, dx, dy, incx, incy, params, result, incz, allocationPointer, tadOnlyShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) namespace functions { namespace pairwise_transforms { template<> __host__ void PairWiseTransform<float>::execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *dx, Nd4jLong xStride, float *y, Nd4jLong yStride, float *result, Nd4jLong resultStride, float *extraParams, Nd4jLong n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) { printf("F4 opNum:[%i]; <<<X: [%i]; Y: [%i]; Z: [%i]>>>\n", opNum, launchDims.x,launchDims.y, launchDims.z); } int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformStridedFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>> ( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleStrided, float, PARAMS(n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<float16>::execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *dx, Nd4jLong xStride, float16 *y, Nd4jLong yStride, float16 *result, Nd4jLong resultStride, float16 *extraParams, Nd4jLong n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformStridedHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>> ( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleStrided, float16, PARAMS(n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<double>::execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *dx, Nd4jLong xStride, double *y, Nd4jLong yStride, double *result, Nd4jLong resultStride, double *extraParams, Nd4jLong n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformStridedDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>> ( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleStrided, double, PARAMS(n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<float>::execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float *dx, Nd4jLong *xShapeInfo, float *y, Nd4jLong *yShapeInfo, float *result, Nd4jLong *resultShapeInfo, float *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D6 opNum:[%i]\n", opNum); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleShaped, float, PARAMS(dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<float16>::execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *y, Nd4jLong *yShapeInfo, float16 *result, Nd4jLong *resultShapeInfo, float16 *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H6 opNum:[%i]\n", opNum); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleShaped, float16, PARAMS(dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } template<> __host__ void PairWiseTransform<double>::execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, double *dx, Nd4jLong *xShapeInfo, double *y, Nd4jLong *yShapeInfo, double *result, Nd4jLong *resultShapeInfo, double *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H6 opNum:[%i]\n", opNum); auto hostXShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[0]); auto hostYShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[7]); auto hostZShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[8]); auto deviceTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); //pairWiseTransformDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); DISPATCH_SIMPLE(pwtSimpleShaped, double, PARAMS(dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo), OPS_A(PAIRWISE_TRANSFORM_OPS)) DEBUG_KERNEL(stream, opNum); } /* template<typename T> __device__ void PairWiseTransform<T>::transformCuda(const int opNum, Nd4jLong n, T *dx, T *y, int incx, int incy, T *extraParams, T *result, int incz, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(n, dx, y, incx, incy, extraParams, result, incz, allocationPointer, manager, tadOnlyShapeInfo), PAIRWISE_TRANSFORM_OPS); } template<typename T> __device__ void PairWiseTransform<T>::transformCuda(const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager,int *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, allocationPointer, manager, tadOnlyShapeInfo), PAIRWISE_TRANSFORM_OPS); } */ /* template<typename T> __device__ void PairWiseTransform<T>::transformCuda(const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(transform, PARAMS(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, yIndexes, resultIndexes, allocationPointer, manager, tadOnlyShapeInfo), PAIRWISE_TRANSFORM_OPS); } */ /* template<typename T> template<typename OpType> __device__ void PairWiseTransform<T>::transform(T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; Nd4jLong n = shape::length(xShapeBuffer); for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { result[resultIndexes[i]] = OpType::op(dx[indexes[i]],y[yIndexes[i]], extraParams); } } */ /** * */ template<typename T> template<typename OpType> __device__ void PairWiseTransform<T>::transformCuda(T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int xRank; __shared__ int yRank; __shared__ int resultRank; __shared__ int xEWS; __shared__ int yEWS; __shared__ int zEWS; __shared__ char xOrder; __shared__ char yOrder; __shared__ char zOrder; __shared__ bool xRow; __shared__ bool yRow; __shared__ bool zRow; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); yRank = shape::rank(yShapeBuffer); resultRank = shape::rank(resultShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(resultShapeBuffer); xOrder = shape::order(xShapeBuffer); yOrder = shape::order(yShapeBuffer); zOrder = shape::order(resultShapeBuffer); xRow = shape::isRowVector(xShapeBuffer); yRow = shape::isRowVector(yShapeBuffer); zRow = shape::isRowVector(resultShapeBuffer); } __syncthreads(); Nd4jLong n = shape::length(xShapeBuffer); if((xEWS >= 1 && yEWS == xEWS && zEWS == xEWS && xOrder == yOrder && zOrder == xOrder) || (xEWS >= 1 && yEWS == xEWS && zEWS == xEWS && xRow && yRow && zRow)) { // TODO: this is wrong, and should be moved to host side transformCuda<OpType>( n, dx, y, xEWS, yEWS, extraParams, result, zEWS, allocationPointer, manager, tadOnlyShapeInfo); } else { if (dx == result) { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(xRank,shape::shapeOf(xShapeBuffer), i, n, xCoord); shape::ind2subC(yRank,shape::shapeOf(yShapeBuffer), i, n, yCoord); auto xOffset = shape::getOffset(0, shape::shapeOf(xShapeBuffer), shape::stride(xShapeBuffer), xCoord, xRank); auto yOffset = shape::getOffset(0, shape::shapeOf(yShapeBuffer), shape::stride(yShapeBuffer), yCoord, yRank); result[xOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong resultCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(xRank,shape::shapeOf(xShapeBuffer), i, n, xCoord); shape::ind2subC(yRank,shape::shapeOf(yShapeBuffer), i, n, yCoord); shape::ind2subC(resultRank,shape::shapeOf(resultShapeBuffer), i, n, resultCoord); auto xOffset = shape::getOffset(0, shape::shapeOf(xShapeBuffer), shape::stride(xShapeBuffer), xCoord, xRank); auto yOffset = shape::getOffset(0, shape::shapeOf(yShapeBuffer), shape::stride(yShapeBuffer), yCoord, yRank); auto resultOffset = shape::getOffset(0, shape::shapeOf(resultShapeBuffer), shape::stride(resultShapeBuffer), resultCoord, resultRank); result[resultOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } } } /* template<typename T> __device__ void transform( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, Nd4jLong n, int *indexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { transform(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, indexes, indexes, allocationPointer, manager, tadOnlyShapeInfo); } template<typename T> __device__ void transform( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { transform(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, yIndexes, indexes, allocationPointer, manager, tadOnlyShapeInfo); } */ /** * * @param n * @param xOffset * @param yOffset * @param resultOffset * @param dx * @param dy * @param incx * @param incy * @param params * @param result * @param incz * @param blockSize */ template<typename T> template<typename OpType> __device__ void PairWiseTransform<T>::transformCuda( Nd4jLong n, T *dx, T *dy, Nd4jLong incx, Nd4jLong incy, T *params, T *result, Nd4jLong incz,int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (incx == incy && incy == incz && incx == 1) { for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { result[i] = OpType::op(dx[i], dy[i], params); } } else { for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { result[i * incz] = OpType::op(dx[i * incx], dy[i * incy], params); } } } } } #endif // CUDA_CC #endif // PAIRWISE_TRANSFORM_CU
77ceafcff9a1212671b25801daa560f65a604685.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2017, 2019 ETH Zrich, Thomas Schps // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include "libvis/cuda/patch_match_stereo.cuh" #include <math_constants.h> #include "libvis/cuda/cuda_auto_tuner.h" #include "libvis/cuda/cuda_unprojection_lookup.cuh" #include "libvis/cuda/cuda_util.cuh" #include "libvis/cuda/cuda_util.h" #include "libvis/cuda/patch_match_stereo_cost.cuh" #include "libvis/cuda/patch_match_stereo_util.cuh" namespace vis { __global__ void PatchMatchFilterOutliersCUDAKernel( const StereoParametersSingleCUDA p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4 reference_tr_stereo, CUDABuffer_<float> inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float> second_best_costs, float second_best_min_cost_factor) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= p.context_radius && y >= p.context_radius && x < p.inv_depth_map.width() - p.context_radius && y < p.inv_depth_map.height() - p.context_radius) { if (!(p.costs(y, x) <= cost_threshold) || // includes NaNs !(p.inv_depth_map(y, x) > min_inv_depth)) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { // If there is another depth value with similar cost, reject the depth // estimate as ambiguous. if (second_best_min_cost_factor > 1) { if (!(second_best_costs(y, x) >= second_best_min_cost_factor * p.costs(y, x))) { // includes NaNs inv_depth_map_out(y, x) = kInvalidInvDepth; return; } } // If at the maximum or minimum depth for this pixel the stereo frame // would not observe that point, discard the pixel (i.e., enforce that // this depth range is observed by both frames). // This is to protect against mistakes that often happen when the frames // overlap in only a small depth range and the actual depth is not within // that range. float2 center_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y); float3 range_min_point = make_float3(required_range_min_depth * center_nxy.x, required_range_min_depth * center_nxy.y, required_range_min_depth); float3 range_max_point = make_float3(required_range_max_depth * center_nxy.x, required_range_max_depth * center_nxy.y, required_range_max_depth); float3 rmin_stereo_point = p.stereo_tr_reference * range_min_point; if (rmin_stereo_point.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmin_pxy = p.stereo_camera.Project(rmin_stereo_point); if (rmin_pxy.x < p.context_radius || rmin_pxy.y < p.context_radius || rmin_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmin_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmin_pxy.y, rmin_pxy.x) == 0)) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } float3 rmax_stereo_point = p.stereo_tr_reference * range_max_point; if (rmax_stereo_point.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmax_pxy = p.stereo_camera.Project(rmax_stereo_point); if (rmax_pxy.x < p.context_radius || rmax_pxy.y < p.context_radius || rmax_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmax_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmax_pxy.y, rmax_pxy.x) == 0)) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Texture filtering: remove pixels with too small gradients along the epipolar line direction in the patch used for matching. // TODO: The code below is only valid for the current ZNCC implementation, not SSD or Census! float inv_depth = p.inv_depth_map(y, x); const char2 normal_char = p.normals(y, x); float2 normal_xy = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; float total_gradient_magnitude = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = p.context_radius * kSamplesCUDA[sample][0]; float dy = p.context_radius * kSamplesCUDA[sample][1]; int ix = ::max(0, ::min(static_cast<int>(p.inv_depth_map.width()) - 1, static_cast<int>(x + dx))); int iy = ::max(0, ::min(static_cast<int>(p.inv_depth_map.height()) - 1, static_cast<int>(y + dy))); if (p.mask.address() && p.mask(iy, ix) == 0) { total_gradient_magnitude = -1; break; } float2 nxy = p.reference_unprojection_lookup.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); float3 original_reference_point = make_float3(nxy.x * plane_depth, nxy.y * plane_depth, plane_depth); float3 original_stereo_point = p.stereo_tr_reference * original_reference_point; constexpr float kShiftZ = 0.01f; float3 shifted_stereo_point = make_float3(original_stereo_point.x, original_stereo_point.y, original_stereo_point.z + kShiftZ); float3 shifted_reference_point = reference_tr_stereo * shifted_stereo_point; const float2 shifted_projection = p.stereo_camera.Project(shifted_reference_point); float2 epipolar_direction = make_float2(shifted_projection.x - 0.5f - (x + dx), shifted_projection.y - 0.5f - (y + dy)); float length = sqrtf(epipolar_direction.x * epipolar_direction.x + epipolar_direction.y * epipolar_direction.y); epipolar_direction = make_float2(epipolar_direction.x / length, epipolar_direction.y / length); // Normalize to length of 1 pixel float reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f, y + dy + 0.5f); float shifted_reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f + epipolar_direction.x, y + dy + 0.5f + epipolar_direction.y); total_gradient_magnitude += fabs(shifted_reference_value - reference_value); } if (total_gradient_magnitude < epipolar_gradient_threshold) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Angle filtering. // Estimate the surface normal from the depth map. float center_depth = 1.f / p.inv_depth_map(y, x); float right_depth = 1.f / p.inv_depth_map(y, x + 1); float left_depth = 1.f / p.inv_depth_map(y, x - 1); float bottom_depth = 1.f / p.inv_depth_map(y + 1, x); float top_depth = 1.f / p.inv_depth_map(y - 1, x); float2 left_nxy = p.reference_unprojection_lookup.UnprojectPoint(x - 1, y); float3 left_point = make_float3(left_depth * left_nxy.x, left_depth * left_nxy.y, left_depth); float2 right_nxy = p.reference_unprojection_lookup.UnprojectPoint(x + 1, y); float3 right_point = make_float3(right_depth * right_nxy.x, right_depth * right_nxy.y, right_depth); float2 top_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y - 1); float3 top_point = make_float3(top_depth * top_nxy.x, top_depth * top_nxy.y, top_depth); float2 bottom_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y + 1); float3 bottom_point = make_float3(bottom_depth * bottom_nxy.x, bottom_depth * bottom_nxy.y, bottom_depth); float3 center_point = make_float3(center_depth * center_nxy.x, center_depth * center_nxy.y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); // Apply angle threshold. const float normal_length = Norm(normal); const float point_distance = Norm(center_point); const float view_cos_angle = Dot(normal, center_point) / (normal_length * point_distance); if (view_cos_angle > min_cos_angle) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { inv_depth_map_out(y, x) = p.inv_depth_map(y, x); } } } else if (x < p.inv_depth_map.width() && y < p.inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; } } void PatchMatchFilterOutliersCUDA( const StereoParametersSingle& p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4& reference_tr_stereo, CUDABuffer_<float>* inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float>* second_best_costs, float second_best_min_cost_factor) { CHECK_CUDA_NO_ERROR(); CUDA_AUTO_TUNE_2D( PatchMatchFilterOutliersCUDAKernel, 16, 16, p.inv_depth_map.width(), p.inv_depth_map.height(), 0, p.stream, /* kernel parameters */ StereoParametersSingleCUDA(p), min_inv_depth, required_range_min_depth, required_range_max_depth, reference_tr_stereo, *inv_depth_map_out, cost_threshold, epipolar_gradient_threshold, min_cos_angle, *second_best_costs, second_best_min_cost_factor); CHECK_CUDA_NO_ERROR(); } template <bool kDebugFilterReasons> __global__ void PatchMatchFilterOutliersCUDAKernel( const StereoParametersMultiCUDA p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4* reference_tr_stereo, CUDABuffer_<float> inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float> second_best_costs, float second_best_min_cost_factor, CUDABuffer_<uchar3> filter_reasons) { // List of filter reasons with debug color: // dark red (127, 0, 0): The depth exceeds the maximum depth // dark green (0, 127, 0): The required depth range is not visible in any stereo image // red (255, 0, 0): The gradients in epipolar line directions are too small for all stereo images // (note: this only uses image-bounds visibility checking in the stereo images, // so it may incorrectly take images into account where the point is occluded) // dark yellow (140, 140, 0): Angle check failed // gray (127, 127, 127): Pixel is too close to the image borders (closer than context radius) // blue (0, 0, 255): Consistency check failed. // green (0, 255, 0): Connected component too small. // black (0, 0, 0): The pixel passed the filters. unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= p.context_radius && y >= p.context_radius && x < p.inv_depth_map.width() - p.context_radius && y < p.inv_depth_map.height() - p.context_radius) { if (!(p.inv_depth_map(y, x) > min_inv_depth)) { // includes NaNs if (kDebugFilterReasons) { if (p.inv_depth_map(y, x) != kInvalidInvDepth) { filter_reasons(y, x) = make_uchar3(127, 0, 0); } } inv_depth_map_out(y, x) = kInvalidInvDepth; } else { // If there is another depth value with similar cost, reject the depth // estimate as ambiguous. // if (second_best_min_cost_factor > 1) { // if (!(second_best_costs(y, x) >= second_best_min_cost_factor * costs(y, x))) { // includes NaNs // inv_depth_map_out(y, x) = kInvalidInvDepth; // return; // } // } // If at the maximum or minimum depth for this pixel the stereo frame // would not observe that point, discard the pixel (i.e., enforce that // this depth range is observed by both frames). // This is to protect against mistakes that often happen when the frames // overlap in only a small depth range and the actual depth is not within // that range. float2 center_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y); float3 range_min_point = make_float3(required_range_min_depth * center_nxy.x, required_range_min_depth * center_nxy.y, required_range_min_depth); float3 range_max_point = make_float3(required_range_max_depth * center_nxy.x, required_range_max_depth * center_nxy.y, required_range_max_depth); bool valid = false; for (int s = 0; s < p.num_stereo_images; ++ s) { float3 rmin_stereo_point = p.stereo_tr_reference[s] * range_min_point; if (rmin_stereo_point.z <= 0.f) { continue; } const float2 rmin_pxy = p.stereo_camera.Project(rmin_stereo_point); if (rmin_pxy.x < p.context_radius || rmin_pxy.y < p.context_radius || rmin_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmin_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmin_pxy.y, rmin_pxy.x) == 0)) { continue; } float3 rmax_stereo_point = p.stereo_tr_reference[s] * range_max_point; if (rmax_stereo_point.z <= 0.f) { continue; } const float2 rmax_pxy = p.stereo_camera.Project(rmax_stereo_point); if (rmax_pxy.x < p.context_radius || rmax_pxy.y < p.context_radius || rmax_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmax_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmax_pxy.y, rmax_pxy.x) == 0)) { continue; } valid = true; break; } if (!valid) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(0, 127, 0); } return; } // TODO: Texture filtering is currently not implemented for the multi-image case // Texture filtering: remove pixels with too small gradients along the epipolar line direction in the patch used for matching. // TODO: The code below is only valid for the current ZNCC implementation, not SSD or Census! float inv_depth = p.inv_depth_map(y, x); const char2 normal_char = p.normals(y, x); float2 normal_xy = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; valid = false; for (int s = 0; s < p.num_stereo_images; ++ s) { float total_gradient_magnitude = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = p.context_radius * kSamplesCUDA[sample][0]; float dy = p.context_radius * kSamplesCUDA[sample][1]; if (s == 0) { int ix = ::max(0, ::min(static_cast<int>(p.inv_depth_map.width() - 1), static_cast<int>(x + dx))); int iy = ::max(0, ::min(static_cast<int>(p.inv_depth_map.height() - 1), static_cast<int>(y + dy))); if (p.mask.address() && p.mask(iy, ix) == 0) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(127, 127, 127); } return; } } float2 nxy = p.reference_unprojection_lookup.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); float3 original_reference_point = make_float3(nxy.x * plane_depth, nxy.y * plane_depth, plane_depth); float3 original_stereo_point = p.stereo_tr_reference[s] * original_reference_point; if (original_stereo_point.z <= 0) { continue; } const float2 stereo_projection = p.stereo_camera.Project(original_stereo_point); if (stereo_projection.x < p.context_radius || stereo_projection.y < p.context_radius || stereo_projection.x >= p.stereo_camera.width - 1 - p.context_radius || stereo_projection.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(stereo_projection.y, stereo_projection.x) == 0)) { continue; } constexpr float kShiftZ = 0.01f; float3 shifted_stereo_point = make_float3(original_stereo_point.x, original_stereo_point.y, original_stereo_point.z + kShiftZ); float3 shifted_reference_point = reference_tr_stereo[s] * shifted_stereo_point; const float2 shifted_projection = p.stereo_camera.Project(shifted_reference_point); float2 epipolar_direction = make_float2(shifted_projection.x - 0.5f - (x + dx), shifted_projection.y - 0.5f - (y + dy)); float length = sqrtf(epipolar_direction.x * epipolar_direction.x + epipolar_direction.y * epipolar_direction.y); epipolar_direction = make_float2(epipolar_direction.x / length, epipolar_direction.y / length); // Normalize to length of 1 pixel float reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f, y + dy + 0.5f); float shifted_reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f + epipolar_direction.x, y + dy + 0.5f + epipolar_direction.y); total_gradient_magnitude += fabs(shifted_reference_value - reference_value); } if (total_gradient_magnitude >= epipolar_gradient_threshold) { valid = true; break; } } if (!valid) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(255, 0, 0); } return; } // Angle filtering. // Estimate the surface normal from the depth map. float center_depth = 1.f / p.inv_depth_map(y, x); float right_depth = 1.f / p.inv_depth_map(y, x + 1); float left_depth = 1.f / p.inv_depth_map(y, x - 1); float bottom_depth = 1.f / p.inv_depth_map(y + 1, x); float top_depth = 1.f / p.inv_depth_map(y - 1, x); float2 left_nxy = p.reference_unprojection_lookup.UnprojectPoint(x - 1, y); float3 left_point = make_float3(left_depth * left_nxy.x, left_depth * left_nxy.y, left_depth); float2 right_nxy = p.reference_unprojection_lookup.UnprojectPoint(x + 1, y); float3 right_point = make_float3(right_depth * right_nxy.x, right_depth * right_nxy.y, right_depth); float2 top_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y - 1); float3 top_point = make_float3(top_depth * top_nxy.x, top_depth * top_nxy.y, top_depth); float2 bottom_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y + 1); float3 bottom_point = make_float3(bottom_depth * bottom_nxy.x, bottom_depth * bottom_nxy.y, bottom_depth); float3 center_point = make_float3(center_depth * center_nxy.x, center_depth * center_nxy.y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); // Apply angle threshold. const float normal_length = Norm(normal); const float point_distance = Norm(center_point); const float view_cos_angle = Dot(normal, center_point) / (normal_length * point_distance); if (view_cos_angle > min_cos_angle) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(140, 140, 0); } } else { inv_depth_map_out(y, x) = p.inv_depth_map(y, x); if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(0, 0, 0); } } } } else if (x < p.inv_depth_map.width() && y < p.inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(127, 127, 127); } } } void PatchMatchFilterOutliersCUDA( const StereoParametersMulti& p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4* reference_tr_stereo, CUDABuffer_<float>* inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float>* second_best_costs, float second_best_min_cost_factor, CUDABuffer_<uchar3>* filter_reasons) { CHECK_CUDA_NO_ERROR(); bool have_filter_reasons = filter_reasons != nullptr; COMPILE_OPTION(have_filter_reasons, CUDA_AUTO_TUNE_2D_TEMPLATED( PatchMatchFilterOutliersCUDAKernel, 16, 16, p.inv_depth_map.width(), p.inv_depth_map.height(), 0, p.stream, TEMPLATE_ARGUMENTS(_have_filter_reasons), /* kernel parameters */ StereoParametersMultiCUDA(p), min_inv_depth, required_range_min_depth, required_range_max_depth, reference_tr_stereo, *inv_depth_map_out, cost_threshold, epipolar_gradient_threshold, min_cos_angle, *second_best_costs, second_best_min_cost_factor, filter_reasons ? *filter_reasons : CUDABuffer_<uchar3>())); CHECK_CUDA_NO_ERROR(); } }
77ceafcff9a1212671b25801daa560f65a604685.cu
// Copyright 2017, 2019 ETH Zürich, Thomas Schöps // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include "libvis/cuda/patch_match_stereo.cuh" #include <math_constants.h> #include "libvis/cuda/cuda_auto_tuner.h" #include "libvis/cuda/cuda_unprojection_lookup.cuh" #include "libvis/cuda/cuda_util.cuh" #include "libvis/cuda/cuda_util.h" #include "libvis/cuda/patch_match_stereo_cost.cuh" #include "libvis/cuda/patch_match_stereo_util.cuh" namespace vis { __global__ void PatchMatchFilterOutliersCUDAKernel( const StereoParametersSingleCUDA p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4 reference_tr_stereo, CUDABuffer_<float> inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float> second_best_costs, float second_best_min_cost_factor) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= p.context_radius && y >= p.context_radius && x < p.inv_depth_map.width() - p.context_radius && y < p.inv_depth_map.height() - p.context_radius) { if (!(p.costs(y, x) <= cost_threshold) || // includes NaNs !(p.inv_depth_map(y, x) > min_inv_depth)) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { // If there is another depth value with similar cost, reject the depth // estimate as ambiguous. if (second_best_min_cost_factor > 1) { if (!(second_best_costs(y, x) >= second_best_min_cost_factor * p.costs(y, x))) { // includes NaNs inv_depth_map_out(y, x) = kInvalidInvDepth; return; } } // If at the maximum or minimum depth for this pixel the stereo frame // would not observe that point, discard the pixel (i.e., enforce that // this depth range is observed by both frames). // This is to protect against mistakes that often happen when the frames // overlap in only a small depth range and the actual depth is not within // that range. float2 center_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y); float3 range_min_point = make_float3(required_range_min_depth * center_nxy.x, required_range_min_depth * center_nxy.y, required_range_min_depth); float3 range_max_point = make_float3(required_range_max_depth * center_nxy.x, required_range_max_depth * center_nxy.y, required_range_max_depth); float3 rmin_stereo_point = p.stereo_tr_reference * range_min_point; if (rmin_stereo_point.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmin_pxy = p.stereo_camera.Project(rmin_stereo_point); if (rmin_pxy.x < p.context_radius || rmin_pxy.y < p.context_radius || rmin_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmin_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmin_pxy.y, rmin_pxy.x) == 0)) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } float3 rmax_stereo_point = p.stereo_tr_reference * range_max_point; if (rmax_stereo_point.z <= 0.f) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } const float2 rmax_pxy = p.stereo_camera.Project(rmax_stereo_point); if (rmax_pxy.x < p.context_radius || rmax_pxy.y < p.context_radius || rmax_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmax_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmax_pxy.y, rmax_pxy.x) == 0)) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Texture filtering: remove pixels with too small gradients along the epipolar line direction in the patch used for matching. // TODO: The code below is only valid for the current ZNCC implementation, not SSD or Census! float inv_depth = p.inv_depth_map(y, x); const char2 normal_char = p.normals(y, x); float2 normal_xy = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; float total_gradient_magnitude = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = p.context_radius * kSamplesCUDA[sample][0]; float dy = p.context_radius * kSamplesCUDA[sample][1]; int ix = ::max(0, ::min(static_cast<int>(p.inv_depth_map.width()) - 1, static_cast<int>(x + dx))); int iy = ::max(0, ::min(static_cast<int>(p.inv_depth_map.height()) - 1, static_cast<int>(y + dy))); if (p.mask.address() && p.mask(iy, ix) == 0) { total_gradient_magnitude = -1; break; } float2 nxy = p.reference_unprojection_lookup.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); float3 original_reference_point = make_float3(nxy.x * plane_depth, nxy.y * plane_depth, plane_depth); float3 original_stereo_point = p.stereo_tr_reference * original_reference_point; constexpr float kShiftZ = 0.01f; float3 shifted_stereo_point = make_float3(original_stereo_point.x, original_stereo_point.y, original_stereo_point.z + kShiftZ); float3 shifted_reference_point = reference_tr_stereo * shifted_stereo_point; const float2 shifted_projection = p.stereo_camera.Project(shifted_reference_point); float2 epipolar_direction = make_float2(shifted_projection.x - 0.5f - (x + dx), shifted_projection.y - 0.5f - (y + dy)); float length = sqrtf(epipolar_direction.x * epipolar_direction.x + epipolar_direction.y * epipolar_direction.y); epipolar_direction = make_float2(epipolar_direction.x / length, epipolar_direction.y / length); // Normalize to length of 1 pixel float reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f, y + dy + 0.5f); float shifted_reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f + epipolar_direction.x, y + dy + 0.5f + epipolar_direction.y); total_gradient_magnitude += fabs(shifted_reference_value - reference_value); } if (total_gradient_magnitude < epipolar_gradient_threshold) { inv_depth_map_out(y, x) = kInvalidInvDepth; return; } // Angle filtering. // Estimate the surface normal from the depth map. float center_depth = 1.f / p.inv_depth_map(y, x); float right_depth = 1.f / p.inv_depth_map(y, x + 1); float left_depth = 1.f / p.inv_depth_map(y, x - 1); float bottom_depth = 1.f / p.inv_depth_map(y + 1, x); float top_depth = 1.f / p.inv_depth_map(y - 1, x); float2 left_nxy = p.reference_unprojection_lookup.UnprojectPoint(x - 1, y); float3 left_point = make_float3(left_depth * left_nxy.x, left_depth * left_nxy.y, left_depth); float2 right_nxy = p.reference_unprojection_lookup.UnprojectPoint(x + 1, y); float3 right_point = make_float3(right_depth * right_nxy.x, right_depth * right_nxy.y, right_depth); float2 top_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y - 1); float3 top_point = make_float3(top_depth * top_nxy.x, top_depth * top_nxy.y, top_depth); float2 bottom_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y + 1); float3 bottom_point = make_float3(bottom_depth * bottom_nxy.x, bottom_depth * bottom_nxy.y, bottom_depth); float3 center_point = make_float3(center_depth * center_nxy.x, center_depth * center_nxy.y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); // Apply angle threshold. const float normal_length = Norm(normal); const float point_distance = Norm(center_point); const float view_cos_angle = Dot(normal, center_point) / (normal_length * point_distance); if (view_cos_angle > min_cos_angle) { inv_depth_map_out(y, x) = kInvalidInvDepth; } else { inv_depth_map_out(y, x) = p.inv_depth_map(y, x); } } } else if (x < p.inv_depth_map.width() && y < p.inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; } } void PatchMatchFilterOutliersCUDA( const StereoParametersSingle& p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4& reference_tr_stereo, CUDABuffer_<float>* inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float>* second_best_costs, float second_best_min_cost_factor) { CHECK_CUDA_NO_ERROR(); CUDA_AUTO_TUNE_2D( PatchMatchFilterOutliersCUDAKernel, 16, 16, p.inv_depth_map.width(), p.inv_depth_map.height(), 0, p.stream, /* kernel parameters */ StereoParametersSingleCUDA(p), min_inv_depth, required_range_min_depth, required_range_max_depth, reference_tr_stereo, *inv_depth_map_out, cost_threshold, epipolar_gradient_threshold, min_cos_angle, *second_best_costs, second_best_min_cost_factor); CHECK_CUDA_NO_ERROR(); } template <bool kDebugFilterReasons> __global__ void PatchMatchFilterOutliersCUDAKernel( const StereoParametersMultiCUDA p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4* reference_tr_stereo, CUDABuffer_<float> inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float> second_best_costs, float second_best_min_cost_factor, CUDABuffer_<uchar3> filter_reasons) { // List of filter reasons with debug color: // dark red (127, 0, 0): The depth exceeds the maximum depth // dark green (0, 127, 0): The required depth range is not visible in any stereo image // red (255, 0, 0): The gradients in epipolar line directions are too small for all stereo images // (note: this only uses image-bounds visibility checking in the stereo images, // so it may incorrectly take images into account where the point is occluded) // dark yellow (140, 140, 0): Angle check failed // gray (127, 127, 127): Pixel is too close to the image borders (closer than context radius) // blue (0, 0, 255): Consistency check failed. // green (0, 255, 0): Connected component too small. // black (0, 0, 0): The pixel passed the filters. unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; const float kInvalidInvDepth = 0; if (x >= p.context_radius && y >= p.context_radius && x < p.inv_depth_map.width() - p.context_radius && y < p.inv_depth_map.height() - p.context_radius) { if (!(p.inv_depth_map(y, x) > min_inv_depth)) { // includes NaNs if (kDebugFilterReasons) { if (p.inv_depth_map(y, x) != kInvalidInvDepth) { filter_reasons(y, x) = make_uchar3(127, 0, 0); } } inv_depth_map_out(y, x) = kInvalidInvDepth; } else { // If there is another depth value with similar cost, reject the depth // estimate as ambiguous. // if (second_best_min_cost_factor > 1) { // if (!(second_best_costs(y, x) >= second_best_min_cost_factor * costs(y, x))) { // includes NaNs // inv_depth_map_out(y, x) = kInvalidInvDepth; // return; // } // } // If at the maximum or minimum depth for this pixel the stereo frame // would not observe that point, discard the pixel (i.e., enforce that // this depth range is observed by both frames). // This is to protect against mistakes that often happen when the frames // overlap in only a small depth range and the actual depth is not within // that range. float2 center_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y); float3 range_min_point = make_float3(required_range_min_depth * center_nxy.x, required_range_min_depth * center_nxy.y, required_range_min_depth); float3 range_max_point = make_float3(required_range_max_depth * center_nxy.x, required_range_max_depth * center_nxy.y, required_range_max_depth); bool valid = false; for (int s = 0; s < p.num_stereo_images; ++ s) { float3 rmin_stereo_point = p.stereo_tr_reference[s] * range_min_point; if (rmin_stereo_point.z <= 0.f) { continue; } const float2 rmin_pxy = p.stereo_camera.Project(rmin_stereo_point); if (rmin_pxy.x < p.context_radius || rmin_pxy.y < p.context_radius || rmin_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmin_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmin_pxy.y, rmin_pxy.x) == 0)) { continue; } float3 rmax_stereo_point = p.stereo_tr_reference[s] * range_max_point; if (rmax_stereo_point.z <= 0.f) { continue; } const float2 rmax_pxy = p.stereo_camera.Project(rmax_stereo_point); if (rmax_pxy.x < p.context_radius || rmax_pxy.y < p.context_radius || rmax_pxy.x >= p.stereo_camera.width - 1 - p.context_radius || rmax_pxy.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(rmax_pxy.y, rmax_pxy.x) == 0)) { continue; } valid = true; break; } if (!valid) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(0, 127, 0); } return; } // TODO: Texture filtering is currently not implemented for the multi-image case // Texture filtering: remove pixels with too small gradients along the epipolar line direction in the patch used for matching. // TODO: The code below is only valid for the current ZNCC implementation, not SSD or Census! float inv_depth = p.inv_depth_map(y, x); const char2 normal_char = p.normals(y, x); float2 normal_xy = make_float2( normal_char.x * (1 / 127.f), normal_char.y * (1 / 127.f)); const float normal_z = -sqrtf(1.f - normal_xy.x * normal_xy.x - normal_xy.y * normal_xy.y); const float depth = 1.f / inv_depth; const float plane_d = (center_nxy.x * depth) * normal_xy.x + (center_nxy.y * depth) * normal_xy.y + depth * normal_z; valid = false; for (int s = 0; s < p.num_stereo_images; ++ s) { float total_gradient_magnitude = 0; for (int sample = 0; sample < kNumSamples; ++ sample) { float dx = p.context_radius * kSamplesCUDA[sample][0]; float dy = p.context_radius * kSamplesCUDA[sample][1]; if (s == 0) { int ix = ::max(0, ::min(static_cast<int>(p.inv_depth_map.width() - 1), static_cast<int>(x + dx))); int iy = ::max(0, ::min(static_cast<int>(p.inv_depth_map.height() - 1), static_cast<int>(y + dy))); if (p.mask.address() && p.mask(iy, ix) == 0) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(127, 127, 127); } return; } } float2 nxy = p.reference_unprojection_lookup.UnprojectPoint(x + dx, y + dy); // NOTE: This is only approximate (bilinear interpolation of exact values sampled at pixel centers). float plane_depth = CalculatePlaneDepth2(plane_d, normal_xy, normal_z, nxy.x, nxy.y); float3 original_reference_point = make_float3(nxy.x * plane_depth, nxy.y * plane_depth, plane_depth); float3 original_stereo_point = p.stereo_tr_reference[s] * original_reference_point; if (original_stereo_point.z <= 0) { continue; } const float2 stereo_projection = p.stereo_camera.Project(original_stereo_point); if (stereo_projection.x < p.context_radius || stereo_projection.y < p.context_radius || stereo_projection.x >= p.stereo_camera.width - 1 - p.context_radius || stereo_projection.y >= p.stereo_camera.height - 1 - p.context_radius || (p.mask.address() && p.mask(stereo_projection.y, stereo_projection.x) == 0)) { continue; } constexpr float kShiftZ = 0.01f; float3 shifted_stereo_point = make_float3(original_stereo_point.x, original_stereo_point.y, original_stereo_point.z + kShiftZ); float3 shifted_reference_point = reference_tr_stereo[s] * shifted_stereo_point; const float2 shifted_projection = p.stereo_camera.Project(shifted_reference_point); float2 epipolar_direction = make_float2(shifted_projection.x - 0.5f - (x + dx), shifted_projection.y - 0.5f - (y + dy)); float length = sqrtf(epipolar_direction.x * epipolar_direction.x + epipolar_direction.y * epipolar_direction.y); epipolar_direction = make_float2(epipolar_direction.x / length, epipolar_direction.y / length); // Normalize to length of 1 pixel float reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f, y + dy + 0.5f); float shifted_reference_value = 255.f * tex2D<float>(p.reference_texture, x + dx + 0.5f + epipolar_direction.x, y + dy + 0.5f + epipolar_direction.y); total_gradient_magnitude += fabs(shifted_reference_value - reference_value); } if (total_gradient_magnitude >= epipolar_gradient_threshold) { valid = true; break; } } if (!valid) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(255, 0, 0); } return; } // Angle filtering. // Estimate the surface normal from the depth map. float center_depth = 1.f / p.inv_depth_map(y, x); float right_depth = 1.f / p.inv_depth_map(y, x + 1); float left_depth = 1.f / p.inv_depth_map(y, x - 1); float bottom_depth = 1.f / p.inv_depth_map(y + 1, x); float top_depth = 1.f / p.inv_depth_map(y - 1, x); float2 left_nxy = p.reference_unprojection_lookup.UnprojectPoint(x - 1, y); float3 left_point = make_float3(left_depth * left_nxy.x, left_depth * left_nxy.y, left_depth); float2 right_nxy = p.reference_unprojection_lookup.UnprojectPoint(x + 1, y); float3 right_point = make_float3(right_depth * right_nxy.x, right_depth * right_nxy.y, right_depth); float2 top_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y - 1); float3 top_point = make_float3(top_depth * top_nxy.x, top_depth * top_nxy.y, top_depth); float2 bottom_nxy = p.reference_unprojection_lookup.UnprojectPoint(x, y + 1); float3 bottom_point = make_float3(bottom_depth * bottom_nxy.x, bottom_depth * bottom_nxy.y, bottom_depth); float3 center_point = make_float3(center_depth * center_nxy.x, center_depth * center_nxy.y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); // Apply angle threshold. const float normal_length = Norm(normal); const float point_distance = Norm(center_point); const float view_cos_angle = Dot(normal, center_point) / (normal_length * point_distance); if (view_cos_angle > min_cos_angle) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(140, 140, 0); } } else { inv_depth_map_out(y, x) = p.inv_depth_map(y, x); if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(0, 0, 0); } } } } else if (x < p.inv_depth_map.width() && y < p.inv_depth_map.height()) { inv_depth_map_out(y, x) = kInvalidInvDepth; if (kDebugFilterReasons) { filter_reasons(y, x) = make_uchar3(127, 127, 127); } } } void PatchMatchFilterOutliersCUDA( const StereoParametersMulti& p, float min_inv_depth, float required_range_min_depth, float required_range_max_depth, const CUDAMatrix3x4* reference_tr_stereo, CUDABuffer_<float>* inv_depth_map_out, float cost_threshold, float epipolar_gradient_threshold, float min_cos_angle, CUDABuffer_<float>* second_best_costs, float second_best_min_cost_factor, CUDABuffer_<uchar3>* filter_reasons) { CHECK_CUDA_NO_ERROR(); bool have_filter_reasons = filter_reasons != nullptr; COMPILE_OPTION(have_filter_reasons, CUDA_AUTO_TUNE_2D_TEMPLATED( PatchMatchFilterOutliersCUDAKernel, 16, 16, p.inv_depth_map.width(), p.inv_depth_map.height(), 0, p.stream, TEMPLATE_ARGUMENTS(_have_filter_reasons), /* kernel parameters */ StereoParametersMultiCUDA(p), min_inv_depth, required_range_min_depth, required_range_max_depth, reference_tr_stereo, *inv_depth_map_out, cost_threshold, epipolar_gradient_threshold, min_cos_angle, *second_best_costs, second_best_min_cost_factor, filter_reasons ? *filter_reasons : CUDABuffer_<uchar3>())); CHECK_CUDA_NO_ERROR(); } }