hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
188cf0f08589dd1ea31dd189d0ad3d618b357efc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2019 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
// Reference:
// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu
//
// Reference:
// https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/iou3d/src/iou3d_kernel.cu
// 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
// Written by Shaoshuai Shi
// All Rights Reserved 2019-2020.
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include "open3d/ml/Helper.h"
#include "open3d/ml/impl/misc/Nms.h"
#include "open3d/ml/impl/misc/NmsImpl.h"
#include "open3d/utility/Helper.h"
namespace open3d {
namespace ml {
namespace impl {
template <typename T>
static void SortIndices(T *values,
int64_t *sort_indices,
int64_t n,
bool descending = false) {
// Cast to thrust device pointer.
thrust::device_ptr<T> values_dptr = thrust::device_pointer_cast(values);
thrust::device_ptr<int64_t> sort_indices_dptr =
thrust::device_pointer_cast(sort_indices);
// Fill sort_indices with 0, 1, ..., n-1.
thrust::sequence(sort_indices_dptr, sort_indices_dptr + n, 0);
// Sort values and sort_indices together.
if (descending) {
thrust::stable_sort_by_key(values_dptr, values_dptr + n,
sort_indices_dptr, thrust::greater<T>());
} else {
thrust::stable_sort_by_key(values_dptr, values_dptr + n,
sort_indices_dptr);
}
}
__global__ void NmsKernel(const float *boxes,
const int64_t *sort_indices,
uint64_t *mask,
const int n,
const double nms_overlap_thresh,
const int num_block_cols) {
// Row-wise block index.
const int block_row_idx = blockIdx.y;
// Column-wise block index.
const int block_col_idx = blockIdx.x;
// Local block row size.
const int row_size =
fminf(n - block_row_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE);
// Local block col size.
const int col_size =
fminf(n - block_col_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE);
// Fill local block_boxes by fetching the global box memory.
// block_boxes = boxes[NBS*block_col_idx : NBS*block_col_idx+col_size, :].
//
// TODO: It is also possible to load the comparison target to the shared
// memory as well.
__shared__ float block_boxes[NMS_BLOCK_SIZE * 5];
if (threadIdx.x < col_size) {
float *dst = block_boxes + threadIdx.x * 5;
const int src_idx = NMS_BLOCK_SIZE * block_col_idx + threadIdx.x;
const float *src = boxes + sort_indices[src_idx] * 5;
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
}
__syncthreads();
// Comparing src and dst. In one block, the following src and dst indices
// are compared:
// - src: BS * block_row_idx : BS * block_row_idx + row_size
// - dst: BS * block_col_idx : BS * block_col_idx + col_size
//
// With all blocks, all src and dst indices are compared.
//
// Result:
// mask[i, j] is a 64-bit integer where mask[i, j][k] (k counted from right)
// is 1 iff box[i] overlaps with box[BS*j+k].
if (threadIdx.x < row_size) {
// src_idx indices the global memory.
const int src_idx = NMS_BLOCK_SIZE * block_row_idx + threadIdx.x;
// dst_idx indices the shared memory.
int dst_idx = block_row_idx == block_col_idx ? threadIdx.x + 1 : 0;
uint64_t t = 0;
while (dst_idx < col_size) {
if (IouBev(boxes + sort_indices[src_idx] * 5,
block_boxes + dst_idx * 5) > nms_overlap_thresh) {
t |= 1ULL << dst_idx;
}
dst_idx++;
}
mask[src_idx * num_block_cols + block_col_idx] = t;
}
}
std::vector<int64_t> NmsCUDAKernel(const float *boxes,
const float *scores,
int n,
double nms_overlap_thresh) {
if (n == 0) {
return {};
}
// Cololum-wise number of blocks.
const int num_block_cols = utility::DivUp(n, NMS_BLOCK_SIZE);
// Compute sort indices.
float *scores_copy = nullptr;
OPEN3D_ML_CUDA_CHECK(hipMalloc((void **)&scores_copy, n * sizeof(float)));
OPEN3D_ML_CUDA_CHECK(hipMemcpy(scores_copy, scores, n * sizeof(float),
hipMemcpyDeviceToDevice));
int64_t *sort_indices = nullptr;
OPEN3D_ML_CUDA_CHECK(
hipMalloc((void **)&sort_indices, n * sizeof(int64_t)));
SortIndices(scores_copy, sort_indices, n, true);
OPEN3D_ML_CUDA_CHECK(hipFree(scores_copy));
// Allocate masks on device.
uint64_t *mask_ptr = nullptr;
OPEN3D_ML_CUDA_CHECK(hipMalloc((void **)&mask_ptr,
n * num_block_cols * sizeof(uint64_t)));
// Launch kernel.
dim3 blocks(utility::DivUp(n, NMS_BLOCK_SIZE),
utility::DivUp(n, NMS_BLOCK_SIZE));
dim3 threads(NMS_BLOCK_SIZE);
hipLaunchKernelGGL(( NmsKernel), dim3(blocks), dim3(threads), 0, 0, boxes, sort_indices, mask_ptr, n,
nms_overlap_thresh, num_block_cols);
// Copy cuda masks to cpu.
std::vector<uint64_t> mask_vec(n * num_block_cols);
uint64_t *mask = mask_vec.data();
OPEN3D_ML_CUDA_CHECK(hipMemcpy(mask_vec.data(), mask_ptr,
n * num_block_cols * sizeof(uint64_t),
hipMemcpyDeviceToHost));
OPEN3D_ML_CUDA_CHECK(hipFree(mask_ptr));
// Copy sort_indices to cpu.
std::vector<int64_t> sort_indices_cpu(n);
OPEN3D_ML_CUDA_CHECK(hipMemcpy(sort_indices_cpu.data(), sort_indices,
n * sizeof(int64_t),
hipMemcpyDeviceToHost));
// Write to keep_indices in CPU.
// remv_cpu has n bits in total. If the bit is 1, the corresponding
// box will be removed.
// TODO: This part can be implemented in CUDA. We use the original author's
// implementation here.
std::vector<uint64_t> remv_cpu(num_block_cols, 0);
std::vector<int64_t> keep_indices;
for (int i = 0; i < n; i++) {
int block_col_idx = i / NMS_BLOCK_SIZE;
int inner_block_col_idx = i % NMS_BLOCK_SIZE; // threadIdx.x
// Querying the i-th bit in remv_cpu, counted from the right.
// - remv_cpu[block_col_idx]: the block bitmap containing the query
// - 1ULL << inner_block_col_idx: the one-hot bitmap to extract i
if (!(remv_cpu[block_col_idx] & (1ULL << inner_block_col_idx))) {
// Keep the i-th box.
keep_indices.push_back(sort_indices_cpu[i]);
// Any box that overlaps with the i-th box will be removed.
uint64_t *p = mask + i * num_block_cols;
for (int j = block_col_idx; j < num_block_cols; j++) {
remv_cpu[j] |= p[j];
}
}
}
OPEN3D_ML_CUDA_CHECK(hipFree(sort_indices));
return keep_indices;
}
} // namespace impl
} // namespace ml
} // namespace open3d
| 188cf0f08589dd1ea31dd189d0ad3d618b357efc.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2019 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
//
// Reference:
// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu
//
// Reference:
// https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/ops/iou3d/src/iou3d_kernel.cu
// 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
// Written by Shaoshuai Shi
// All Rights Reserved 2019-2020.
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include "open3d/ml/Helper.h"
#include "open3d/ml/impl/misc/Nms.h"
#include "open3d/ml/impl/misc/NmsImpl.h"
#include "open3d/utility/Helper.h"
namespace open3d {
namespace ml {
namespace impl {
template <typename T>
static void SortIndices(T *values,
int64_t *sort_indices,
int64_t n,
bool descending = false) {
// Cast to thrust device pointer.
thrust::device_ptr<T> values_dptr = thrust::device_pointer_cast(values);
thrust::device_ptr<int64_t> sort_indices_dptr =
thrust::device_pointer_cast(sort_indices);
// Fill sort_indices with 0, 1, ..., n-1.
thrust::sequence(sort_indices_dptr, sort_indices_dptr + n, 0);
// Sort values and sort_indices together.
if (descending) {
thrust::stable_sort_by_key(values_dptr, values_dptr + n,
sort_indices_dptr, thrust::greater<T>());
} else {
thrust::stable_sort_by_key(values_dptr, values_dptr + n,
sort_indices_dptr);
}
}
__global__ void NmsKernel(const float *boxes,
const int64_t *sort_indices,
uint64_t *mask,
const int n,
const double nms_overlap_thresh,
const int num_block_cols) {
// Row-wise block index.
const int block_row_idx = blockIdx.y;
// Column-wise block index.
const int block_col_idx = blockIdx.x;
// Local block row size.
const int row_size =
fminf(n - block_row_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE);
// Local block col size.
const int col_size =
fminf(n - block_col_idx * NMS_BLOCK_SIZE, NMS_BLOCK_SIZE);
// Fill local block_boxes by fetching the global box memory.
// block_boxes = boxes[NBS*block_col_idx : NBS*block_col_idx+col_size, :].
//
// TODO: It is also possible to load the comparison target to the shared
// memory as well.
__shared__ float block_boxes[NMS_BLOCK_SIZE * 5];
if (threadIdx.x < col_size) {
float *dst = block_boxes + threadIdx.x * 5;
const int src_idx = NMS_BLOCK_SIZE * block_col_idx + threadIdx.x;
const float *src = boxes + sort_indices[src_idx] * 5;
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
}
__syncthreads();
// Comparing src and dst. In one block, the following src and dst indices
// are compared:
// - src: BS * block_row_idx : BS * block_row_idx + row_size
// - dst: BS * block_col_idx : BS * block_col_idx + col_size
//
// With all blocks, all src and dst indices are compared.
//
// Result:
// mask[i, j] is a 64-bit integer where mask[i, j][k] (k counted from right)
// is 1 iff box[i] overlaps with box[BS*j+k].
if (threadIdx.x < row_size) {
// src_idx indices the global memory.
const int src_idx = NMS_BLOCK_SIZE * block_row_idx + threadIdx.x;
// dst_idx indices the shared memory.
int dst_idx = block_row_idx == block_col_idx ? threadIdx.x + 1 : 0;
uint64_t t = 0;
while (dst_idx < col_size) {
if (IouBev(boxes + sort_indices[src_idx] * 5,
block_boxes + dst_idx * 5) > nms_overlap_thresh) {
t |= 1ULL << dst_idx;
}
dst_idx++;
}
mask[src_idx * num_block_cols + block_col_idx] = t;
}
}
std::vector<int64_t> NmsCUDAKernel(const float *boxes,
const float *scores,
int n,
double nms_overlap_thresh) {
if (n == 0) {
return {};
}
// Cololum-wise number of blocks.
const int num_block_cols = utility::DivUp(n, NMS_BLOCK_SIZE);
// Compute sort indices.
float *scores_copy = nullptr;
OPEN3D_ML_CUDA_CHECK(cudaMalloc((void **)&scores_copy, n * sizeof(float)));
OPEN3D_ML_CUDA_CHECK(cudaMemcpy(scores_copy, scores, n * sizeof(float),
cudaMemcpyDeviceToDevice));
int64_t *sort_indices = nullptr;
OPEN3D_ML_CUDA_CHECK(
cudaMalloc((void **)&sort_indices, n * sizeof(int64_t)));
SortIndices(scores_copy, sort_indices, n, true);
OPEN3D_ML_CUDA_CHECK(cudaFree(scores_copy));
// Allocate masks on device.
uint64_t *mask_ptr = nullptr;
OPEN3D_ML_CUDA_CHECK(cudaMalloc((void **)&mask_ptr,
n * num_block_cols * sizeof(uint64_t)));
// Launch kernel.
dim3 blocks(utility::DivUp(n, NMS_BLOCK_SIZE),
utility::DivUp(n, NMS_BLOCK_SIZE));
dim3 threads(NMS_BLOCK_SIZE);
NmsKernel<<<blocks, threads>>>(boxes, sort_indices, mask_ptr, n,
nms_overlap_thresh, num_block_cols);
// Copy cuda masks to cpu.
std::vector<uint64_t> mask_vec(n * num_block_cols);
uint64_t *mask = mask_vec.data();
OPEN3D_ML_CUDA_CHECK(cudaMemcpy(mask_vec.data(), mask_ptr,
n * num_block_cols * sizeof(uint64_t),
cudaMemcpyDeviceToHost));
OPEN3D_ML_CUDA_CHECK(cudaFree(mask_ptr));
// Copy sort_indices to cpu.
std::vector<int64_t> sort_indices_cpu(n);
OPEN3D_ML_CUDA_CHECK(cudaMemcpy(sort_indices_cpu.data(), sort_indices,
n * sizeof(int64_t),
cudaMemcpyDeviceToHost));
// Write to keep_indices in CPU.
// remv_cpu has n bits in total. If the bit is 1, the corresponding
// box will be removed.
// TODO: This part can be implemented in CUDA. We use the original author's
// implementation here.
std::vector<uint64_t> remv_cpu(num_block_cols, 0);
std::vector<int64_t> keep_indices;
for (int i = 0; i < n; i++) {
int block_col_idx = i / NMS_BLOCK_SIZE;
int inner_block_col_idx = i % NMS_BLOCK_SIZE; // threadIdx.x
// Querying the i-th bit in remv_cpu, counted from the right.
// - remv_cpu[block_col_idx]: the block bitmap containing the query
// - 1ULL << inner_block_col_idx: the one-hot bitmap to extract i
if (!(remv_cpu[block_col_idx] & (1ULL << inner_block_col_idx))) {
// Keep the i-th box.
keep_indices.push_back(sort_indices_cpu[i]);
// Any box that overlaps with the i-th box will be removed.
uint64_t *p = mask + i * num_block_cols;
for (int j = block_col_idx; j < num_block_cols; j++) {
remv_cpu[j] |= p[j];
}
}
}
OPEN3D_ML_CUDA_CHECK(cudaFree(sort_indices));
return keep_indices;
}
} // namespace impl
} // namespace ml
} // namespace open3d
|
24908f394d8ea0dcbc769494a6754c7e4ddac2ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "Utils.h"
hipError_t handleCudaErrors(hipError_t error_code) {
if(error_code != hipSuccess) {
std::cout << "CUDA-related error: " << hipGetErrorString(error_code) << std::endl;
exit(EXIT_FAILURE);
}
return error_code;
}
unsigned getNumberOfStreamingMultiprocessors() {
int deviceId = 0;
handleCudaErrors( hipGetDevice(&deviceId) );
hipDeviceProp_t deviceProp{};
handleCudaErrors( hipGetDeviceProperties(&deviceProp, deviceId) );
return static_cast<unsigned>(deviceProp.multiProcessorCount);
}
void printInformationAboutGPUDevice() {
int deviceId = 0;
handleCudaErrors( hipGetDevice(&deviceId) );
hipDeviceProp_t deviceProp{};
handleCudaErrors( hipGetDeviceProperties(&deviceProp, deviceId) );
std::cout << "[DEVICE] Name: " << deviceProp.name << ", number of SMs: " << deviceProp.multiProcessorCount
<< ", shared memory per SM: " << deviceProp.sharedMemPerMultiprocessor << std::endl;
} | 24908f394d8ea0dcbc769494a6754c7e4ddac2ea.cu | #include "Utils.h"
cudaError_t handleCudaErrors(cudaError_t error_code) {
if(error_code != cudaSuccess) {
std::cout << "CUDA-related error: " << cudaGetErrorString(error_code) << std::endl;
exit(EXIT_FAILURE);
}
return error_code;
}
unsigned getNumberOfStreamingMultiprocessors() {
int deviceId = 0;
handleCudaErrors( cudaGetDevice(&deviceId) );
cudaDeviceProp deviceProp{};
handleCudaErrors( cudaGetDeviceProperties(&deviceProp, deviceId) );
return static_cast<unsigned>(deviceProp.multiProcessorCount);
}
void printInformationAboutGPUDevice() {
int deviceId = 0;
handleCudaErrors( cudaGetDevice(&deviceId) );
cudaDeviceProp deviceProp{};
handleCudaErrors( cudaGetDeviceProperties(&deviceProp, deviceId) );
std::cout << "[DEVICE] Name: " << deviceProp.name << ", number of SMs: " << deviceProp.multiProcessorCount
<< ", shared memory per SM: " << deviceProp.sharedMemPerMultiprocessor << std::endl;
} |
b12983d5366ae7efb55e59b0d73909467459cd3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// GpuSim_CUDA.cu
// by Sean McCallum, 2010
#include "global.h"
#include "DynamicSpikingSynapse_struct.h"
#include "LifNeuron_struct.h"
#include <iostream>
#include <fstream>
using namespace std;
//Forward Declarations
extern "C" void advanceGPU( LifNeuron_struct* neuronArrays,
int neuron_count,
DynamicSpikingSynapse_struct* synapseArray,
int synapse_count,
int endTime,
int currentStep,
int maxSteps,
int width);
// Make pointers to device neuron arrays
FLOAT* neur_C1_d;
FLOAT* neur_C2_d;
FLOAT* neur_I0_d;
FLOAT* neur_Inoise_d;
FLOAT* neur_Trefract_d;
FLOAT* neur_Vm_d;
FLOAT* neur_Vreset_d;
FLOAT* neur_Vthresh_d;
int* neur_nStepsInRefr_d;
int* neur_spikeCount_d;
FLOAT* neur_summationPoint_d;
FLOAT* neur_randNoise_d;
// Determine sizes for GPU device arrays
size_t neur_FLOATS_size;
size_t neur_ints_size;
__device__ void LifNeuronFireDevice(FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
int* neur_spikeCount_d,
int* neur_nStepsInRefr_d,
int idx,
double deltaT,
double simulationTime);
__global__ void advanceNeuronsDevice(FLOAT* neur_C1_d,
FLOAT* neur_C2_d,
FLOAT* neur_I0_d,
FLOAT* neur_Inoise_d,
FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
FLOAT* neur_Vthresh_d,
int* neur_nStepsInRefr_d,
int* neur_spikeCount_d,
FLOAT* neur_summationPoint_d,
FLOAT* neur_randNoise_d,
int n,
double deltaT,
double simulationTime);
void advanceGPU(LifNeuron_struct* neuronArrays,
int neuron_count,
DynamicSpikingSynapse_struct* synapseArray,
int synapse_count,
int endTime,
int currentStep,
int maxSteps,
int width)
{
if (g_simulationTime == 0.0){
// Get sizes of arrays
neur_FLOATS_size = neuron_count * sizeof (FLOAT);
neur_ints_size = neuron_count * sizeof (int);
// Allocate on GPU device
hipMalloc ( ( void ** ) &neur_C1_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_C2_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_I0_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_Inoise_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_Trefract_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_Vm_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_Vreset_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_Vthresh_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_nStepsInRefr_d, neur_ints_size );
hipMalloc ( ( void ** ) &neur_summationPoint_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_randNoise_d, neur_FLOATS_size );
hipMalloc ( ( void ** ) &neur_spikeCount_d, neur_ints_size );
// Copy host neuron and synapse arrays into GPU device
hipMemcpy ( neur_C1_d, neuronArrays->C1, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_C2_d, neuronArrays->C2, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_I0_d, neuronArrays->I0, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_Inoise_d, neuronArrays->Inoise, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_Trefract_d, neuronArrays->Trefract, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_Vm_d, neuronArrays->Vm, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_Vreset_d, neuronArrays->Vreset, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_Vthresh_d, neuronArrays->Vthresh, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_nStepsInRefr_d, neuronArrays->nStepsInRefr, neur_ints_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_summationPoint_d, neuronArrays->summationPoint, neur_FLOATS_size, hipMemcpyHostToDevice );
hipMemcpy ( neur_spikeCount_d, neuronArrays->spikeCount, neur_ints_size, hipMemcpyHostToDevice );
}
// Configure dimensions for CUDA scheduling per sim size
int blocksx, blocksy, threadsx, threadsy;
if (neuronArrays->numNeurons == 100){
blocksx = 2;
blocksy = 1;
threadsx = 64;
threadsy = 1;
}
if (neuronArrays->numNeurons == 625){
blocksx = 5;
blocksy = 5;
threadsx = 8;
threadsy = 4;
}
if (neuronArrays->numNeurons == 10000){
blocksx = 8;
blocksy = 5;
threadsx = 16;
threadsy = 16;
}
dim3 dimGrid(blocksx, blocksy);
dim3 dimBlock(threadsx, threadsy);
DEBUG(cout << "Looping kernels in advanceGPU() function" << endl;);
while (g_simulationTime < endTime)
{
// Generate random noise
for (int i = 0; i < neuron_count; i++){
neuronArrays->randNoise[i] = normrnd();
}
// Copy random noise to GPU device
hipMemcpy ( neur_randNoise_d, neuronArrays->randNoise, neur_FLOATS_size, hipMemcpyHostToDevice );
// Advance neurons
hipLaunchKernelGGL(( advanceNeuronsDevice) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
neur_C1_d,
neur_C2_d,
neur_I0_d,
neur_Inoise_d,
neur_Trefract_d,
neur_Vm_d,
neur_Vreset_d,
neur_Vthresh_d,
neur_nStepsInRefr_d,
neur_spikeCount_d,
neur_summationPoint_d,
neur_randNoise_d,
neuron_count,
neuronArrays->deltaT,
g_simulationTime );
// Copy processed data from GPU device memory to host
hipMemcpy ( neuronArrays->spikeCount, neur_spikeCount_d, neur_ints_size, hipMemcpyDeviceToHost );
// Go through spike counts and add a g_simulationTime to the psi->pNeuronList->at(i).spikeHistory
for (int i = 0; i < neuron_count; i++){
if ( neuronArrays->spikeCount[i] > 0) {
vector<FLOAT>& spikeHist = *(neuronArrays->spikeHistories[i]);
spikeHist.push_back(g_simulationTime);
neuronArrays->spikeCount[i] = 0; // reset for next advance
}
}
// Copy zeroed array back to GPU
hipMemcpy ( neur_spikeCount_d, neuronArrays->spikeCount, neur_ints_size, hipMemcpyHostToDevice );
// Advance the clock
g_simulationTime += neuronArrays->deltaT;
}
// Determine whether to free device mem.
if (currentStep == maxSteps && g_simulationTime >= endTime){
hipFree( neur_C1_d );
hipFree( neur_C2_d );
hipFree( neur_I0_d );
hipFree( neur_Inoise_d );
hipFree( neur_Vm_d );
hipFree( neur_Trefract_d );
hipFree( neur_Vreset_d );
hipFree( neur_Vthresh_d );
hipFree( neur_nStepsInRefr_d );
hipFree( neur_summationPoint_d );
}
}
// CUDA code for firing a neuron ------------------------------------------------------------------------
__device__ void LifNeuronFireDevice ( FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
int* neur_spikeCount_d,
int* neur_nStepsInRefr_d,
int idx,
double deltaT,
double simulationTime ) {
// Note the occurrence of a spike
neur_spikeCount_d[idx]++;
// calculate the number of steps in the absolute refractory period
neur_nStepsInRefr_d[idx] = static_cast<int> ( neur_Trefract_d[idx] / deltaT + 0.5 );
// reset to 'Vreset'
neur_Vm_d[idx] = neur_Vreset_d[idx];
}
// CUDA code for advancing neurons-----------------------------------------------------------------------
__global__ void advanceNeuronsDevice(FLOAT* neur_C1_d,
FLOAT* neur_C2_d,
FLOAT* neur_I0_d,
FLOAT* neur_Inoise_d,
FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
FLOAT* neur_Vthresh_d,
int* neur_nStepsInRefr_d,
int* neur_spikeCount_d,
FLOAT* neur_summationPoint_d,
FLOAT* neur_randNoise_d,
int n,
double deltaT,
double simulationTime) {
// determine which neuron this thread is processing
//int idx = threadIdx.x;
int idx = ((blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y)) +
(threadIdx.x + threadIdx.y * blockDim.x);
// Reset fired status
//neur_hasFired_d[idx] = false;
if ( neur_nStepsInRefr_d[idx] > 0 ) { // is neuron refractory?
--neur_nStepsInRefr_d[idx];
} else if ( neur_Vm_d[idx] >= neur_Vthresh_d[idx]) { // should it fire?
LifNeuronFireDevice ( neur_Trefract_d,
neur_Vm_d,
neur_Vreset_d,
neur_spikeCount_d,
neur_nStepsInRefr_d,
idx,
deltaT,
simulationTime );
} else {
neur_summationPoint_d[idx] += neur_I0_d[idx]; // add IO
neur_summationPoint_d[idx] += (neur_randNoise_d[idx] * neur_Inoise_d[idx]); // add cheap noise
neur_Vm_d[idx] = neur_C1_d[idx] * neur_Vm_d[idx] + neur_C2_d[idx] * neur_summationPoint_d[idx]; // decay Vm and add inputs
}
// clear synaptic input for next time step
neur_summationPoint_d[idx] = 0;
}
| b12983d5366ae7efb55e59b0d73909467459cd3d.cu | // GpuSim_CUDA.cu
// by Sean McCallum, 2010
#include "global.h"
#include "DynamicSpikingSynapse_struct.h"
#include "LifNeuron_struct.h"
#include <iostream>
#include <fstream>
using namespace std;
//Forward Declarations
extern "C" void advanceGPU( LifNeuron_struct* neuronArrays,
int neuron_count,
DynamicSpikingSynapse_struct* synapseArray,
int synapse_count,
int endTime,
int currentStep,
int maxSteps,
int width);
// Make pointers to device neuron arrays
FLOAT* neur_C1_d;
FLOAT* neur_C2_d;
FLOAT* neur_I0_d;
FLOAT* neur_Inoise_d;
FLOAT* neur_Trefract_d;
FLOAT* neur_Vm_d;
FLOAT* neur_Vreset_d;
FLOAT* neur_Vthresh_d;
int* neur_nStepsInRefr_d;
int* neur_spikeCount_d;
FLOAT* neur_summationPoint_d;
FLOAT* neur_randNoise_d;
// Determine sizes for GPU device arrays
size_t neur_FLOATS_size;
size_t neur_ints_size;
__device__ void LifNeuronFireDevice(FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
int* neur_spikeCount_d,
int* neur_nStepsInRefr_d,
int idx,
double deltaT,
double simulationTime);
__global__ void advanceNeuronsDevice(FLOAT* neur_C1_d,
FLOAT* neur_C2_d,
FLOAT* neur_I0_d,
FLOAT* neur_Inoise_d,
FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
FLOAT* neur_Vthresh_d,
int* neur_nStepsInRefr_d,
int* neur_spikeCount_d,
FLOAT* neur_summationPoint_d,
FLOAT* neur_randNoise_d,
int n,
double deltaT,
double simulationTime);
void advanceGPU(LifNeuron_struct* neuronArrays,
int neuron_count,
DynamicSpikingSynapse_struct* synapseArray,
int synapse_count,
int endTime,
int currentStep,
int maxSteps,
int width)
{
if (g_simulationTime == 0.0){
// Get sizes of arrays
neur_FLOATS_size = neuron_count * sizeof (FLOAT);
neur_ints_size = neuron_count * sizeof (int);
// Allocate on GPU device
cudaMalloc ( ( void ** ) &neur_C1_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_C2_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_I0_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_Inoise_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_Trefract_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_Vm_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_Vreset_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_Vthresh_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_nStepsInRefr_d, neur_ints_size );
cudaMalloc ( ( void ** ) &neur_summationPoint_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_randNoise_d, neur_FLOATS_size );
cudaMalloc ( ( void ** ) &neur_spikeCount_d, neur_ints_size );
// Copy host neuron and synapse arrays into GPU device
cudaMemcpy ( neur_C1_d, neuronArrays->C1, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_C2_d, neuronArrays->C2, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_I0_d, neuronArrays->I0, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_Inoise_d, neuronArrays->Inoise, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_Trefract_d, neuronArrays->Trefract, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_Vm_d, neuronArrays->Vm, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_Vreset_d, neuronArrays->Vreset, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_Vthresh_d, neuronArrays->Vthresh, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_nStepsInRefr_d, neuronArrays->nStepsInRefr, neur_ints_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_summationPoint_d, neuronArrays->summationPoint, neur_FLOATS_size, cudaMemcpyHostToDevice );
cudaMemcpy ( neur_spikeCount_d, neuronArrays->spikeCount, neur_ints_size, cudaMemcpyHostToDevice );
}
// Configure dimensions for CUDA scheduling per sim size
int blocksx, blocksy, threadsx, threadsy;
if (neuronArrays->numNeurons == 100){
blocksx = 2;
blocksy = 1;
threadsx = 64;
threadsy = 1;
}
if (neuronArrays->numNeurons == 625){
blocksx = 5;
blocksy = 5;
threadsx = 8;
threadsy = 4;
}
if (neuronArrays->numNeurons == 10000){
blocksx = 8;
blocksy = 5;
threadsx = 16;
threadsy = 16;
}
dim3 dimGrid(blocksx, blocksy);
dim3 dimBlock(threadsx, threadsy);
DEBUG(cout << "Looping kernels in advanceGPU() function" << endl;);
while (g_simulationTime < endTime)
{
// Generate random noise
for (int i = 0; i < neuron_count; i++){
neuronArrays->randNoise[i] = normrnd();
}
// Copy random noise to GPU device
cudaMemcpy ( neur_randNoise_d, neuronArrays->randNoise, neur_FLOATS_size, cudaMemcpyHostToDevice );
// Advance neurons
advanceNeuronsDevice <<< dimGrid, dimBlock >>> (
neur_C1_d,
neur_C2_d,
neur_I0_d,
neur_Inoise_d,
neur_Trefract_d,
neur_Vm_d,
neur_Vreset_d,
neur_Vthresh_d,
neur_nStepsInRefr_d,
neur_spikeCount_d,
neur_summationPoint_d,
neur_randNoise_d,
neuron_count,
neuronArrays->deltaT,
g_simulationTime );
// Copy processed data from GPU device memory to host
cudaMemcpy ( neuronArrays->spikeCount, neur_spikeCount_d, neur_ints_size, cudaMemcpyDeviceToHost );
// Go through spike counts and add a g_simulationTime to the psi->pNeuronList->at(i).spikeHistory
for (int i = 0; i < neuron_count; i++){
if ( neuronArrays->spikeCount[i] > 0) {
vector<FLOAT>& spikeHist = *(neuronArrays->spikeHistories[i]);
spikeHist.push_back(g_simulationTime);
neuronArrays->spikeCount[i] = 0; // reset for next advance
}
}
// Copy zeroed array back to GPU
cudaMemcpy ( neur_spikeCount_d, neuronArrays->spikeCount, neur_ints_size, cudaMemcpyHostToDevice );
// Advance the clock
g_simulationTime += neuronArrays->deltaT;
}
// Determine whether to free device mem.
if (currentStep == maxSteps && g_simulationTime >= endTime){
cudaFree( neur_C1_d );
cudaFree( neur_C2_d );
cudaFree( neur_I0_d );
cudaFree( neur_Inoise_d );
cudaFree( neur_Vm_d );
cudaFree( neur_Trefract_d );
cudaFree( neur_Vreset_d );
cudaFree( neur_Vthresh_d );
cudaFree( neur_nStepsInRefr_d );
cudaFree( neur_summationPoint_d );
}
}
// CUDA code for firing a neuron ------------------------------------------------------------------------
__device__ void LifNeuronFireDevice ( FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
int* neur_spikeCount_d,
int* neur_nStepsInRefr_d,
int idx,
double deltaT,
double simulationTime ) {
// Note the occurrence of a spike
neur_spikeCount_d[idx]++;
// calculate the number of steps in the absolute refractory period
neur_nStepsInRefr_d[idx] = static_cast<int> ( neur_Trefract_d[idx] / deltaT + 0.5 );
// reset to 'Vreset'
neur_Vm_d[idx] = neur_Vreset_d[idx];
}
// CUDA code for advancing neurons-----------------------------------------------------------------------
__global__ void advanceNeuronsDevice(FLOAT* neur_C1_d,
FLOAT* neur_C2_d,
FLOAT* neur_I0_d,
FLOAT* neur_Inoise_d,
FLOAT* neur_Trefract_d,
FLOAT* neur_Vm_d,
FLOAT* neur_Vreset_d,
FLOAT* neur_Vthresh_d,
int* neur_nStepsInRefr_d,
int* neur_spikeCount_d,
FLOAT* neur_summationPoint_d,
FLOAT* neur_randNoise_d,
int n,
double deltaT,
double simulationTime) {
// determine which neuron this thread is processing
//int idx = threadIdx.x;
int idx = ((blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y)) +
(threadIdx.x + threadIdx.y * blockDim.x);
// Reset fired status
//neur_hasFired_d[idx] = false;
if ( neur_nStepsInRefr_d[idx] > 0 ) { // is neuron refractory?
--neur_nStepsInRefr_d[idx];
} else if ( neur_Vm_d[idx] >= neur_Vthresh_d[idx]) { // should it fire?
LifNeuronFireDevice ( neur_Trefract_d,
neur_Vm_d,
neur_Vreset_d,
neur_spikeCount_d,
neur_nStepsInRefr_d,
idx,
deltaT,
simulationTime );
} else {
neur_summationPoint_d[idx] += neur_I0_d[idx]; // add IO
neur_summationPoint_d[idx] += (neur_randNoise_d[idx] * neur_Inoise_d[idx]); // add cheap noise
neur_Vm_d[idx] = neur_C1_d[idx] * neur_Vm_d[idx] + neur_C2_d[idx] * neur_summationPoint_d[idx]; // decay Vm and add inputs
}
// clear synaptic input for next time step
neur_summationPoint_d[idx] = 0;
}
|
473a0d5245c5db027cb89eab49732ceb409d4977.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : GScuda.cu
Author : caleb
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err);
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void GSKernel(int n, int* male_prefs, int* fast_female, int* output) {
//find a female to propose to
male_prefs[thread]
}
__global__ void FFKernel (int n, int* female_prefs, int* fast_female) {
int second_index = female_prefs[blockIdx.x*n+threadIdx.x];
fast_female[blockIdx.x*n+second_index] = threadIdx.x;
}
/**
* Host function that copies the data and launches GS on the CPU
*
*/
void GS(int n, int** male_prefs, int** female_prefs, int* output)
{
int *d_male_prefs, *d_female_prefs,*d_fast_female;
char *d_is_engaged;
int *d_next_female;
int* d_output;
size_t prefs_size = sizeof(int)*n*n;
CUDA_CHECK_RETURN(hipMalloc((void **)&d_male_prefs, prefs_size));
CUDA_CHECK_RETURN(hipMalloc((void **)&d_female_prefs, prefs_size));
CUDA_CHECK_RETURN(hipMalloc((void **)&d_fast_female, prefs_size));
CUDA_CHECK_RETURN(hipMalloc((void **)&d_output, sizeof(int)*n));
CUDA_CHECK_RETURN(hipMalloc((void **)&d_is_engaged, sizeof(char)*n));
CUDA_CHECK_RETURN(hipMalloc((void **)&d_next_female, sizeof(char)*n));
CUDA_CHECK_RETURN(hipMemcpy(d_male_prefs, male_prefs, prefs_size, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(d_female_prefs, female_prefs, prefs_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( FFKernel), dim3(n),dim3(n), 0, 0, n,d_female_prefs,d_fast_female);
//static const int BLOCK_SIZE = 256;
//const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE;
// make fast_female
hipLaunchKernelGGL(( GSKernel), dim3(1),dim3(n), 0, 0, n, d_male_prefs,d_female_prefs, d_output);
CUDA_CHECK_RETURN(hipMemcpy(output, d_output, sizeof(int)*n, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipFree(d_male_prefs));
CUDA_CHECK_RETURN(hipFree(d_female_prefs));
CUDA_CHECK_RETURN(hipFree(d_fast_female));
CUDA_CHECK_RETURN(hipFree(d_output));
}
/*
* testing framework
* returns: 0 if correct_output is equal to the output and 1 otherwise
*/
char test_frame(int n, int** male_prefs, int** female_prefs, int* correct_output) {
int* test_output = (int*) malloc(sizeof(int)*n);
GS(n,male_prefs,female_prefs, test_output);
char to_return = 0;
for (int i = 0; i < n; i++) {
if (test_output[i] != correct_output[i]) {
to_return = 1;
break;
}
}
free(test_output);
return to_return;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
| 473a0d5245c5db027cb89eab49732ceb409d4977.cu | /*
============================================================================
Name : GScuda.cu
Author : caleb
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err);
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void GSKernel(int n, int* male_prefs, int* fast_female, int* output) {
//find a female to propose to
male_prefs[thread]
}
__global__ void FFKernel (int n, int* female_prefs, int* fast_female) {
int second_index = female_prefs[blockIdx.x*n+threadIdx.x];
fast_female[blockIdx.x*n+second_index] = threadIdx.x;
}
/**
* Host function that copies the data and launches GS on the CPU
*
*/
void GS(int n, int** male_prefs, int** female_prefs, int* output)
{
int *d_male_prefs, *d_female_prefs,*d_fast_female;
char *d_is_engaged;
int *d_next_female;
int* d_output;
size_t prefs_size = sizeof(int)*n*n;
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_male_prefs, prefs_size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_female_prefs, prefs_size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_fast_female, prefs_size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_output, sizeof(int)*n));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_is_engaged, sizeof(char)*n));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_next_female, sizeof(char)*n));
CUDA_CHECK_RETURN(cudaMemcpy(d_male_prefs, male_prefs, prefs_size, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_female_prefs, female_prefs, prefs_size, cudaMemcpyHostToDevice));
FFKernel<<<n,n>>> (n,d_female_prefs,d_fast_female);
//static const int BLOCK_SIZE = 256;
//const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE;
// make fast_female
GSKernel<<<1,n>>> (n, d_male_prefs,d_female_prefs, d_output);
CUDA_CHECK_RETURN(cudaMemcpy(output, d_output, sizeof(int)*n, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(d_male_prefs));
CUDA_CHECK_RETURN(cudaFree(d_female_prefs));
CUDA_CHECK_RETURN(cudaFree(d_fast_female));
CUDA_CHECK_RETURN(cudaFree(d_output));
}
/*
* testing framework
* returns: 0 if correct_output is equal to the output and 1 otherwise
*/
char test_frame(int n, int** male_prefs, int** female_prefs, int* correct_output) {
int* test_output = (int*) malloc(sizeof(int)*n);
GS(n,male_prefs,female_prefs, test_output);
char to_return = 0;
for (int i = 0; i < n; i++) {
if (test_output[i] != correct_output[i]) {
to_return = 1;
break;
}
}
free(test_output);
return to_return;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
051c9bd7f0aea77d1e555ddf4089f762ca2008a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "qcd.h"
//#define REDUCE_PRECISION float
#define REDUCE_PRECISION double
#define REDUCE_THREADS 128
#define REDUCE_MAX_BLOCKS 128
void zeroCuda(float* dst, int len) {
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
hipMemset(dst, 0, len*sizeof(float));
}
void copyCuda(float* dst, float *src, int len) {
hipMemcpy(dst, src, len*sizeof(float), hipMemcpyDeviceToDevice);
}
__global__ void axpbyKernel(float a, float *x, float b, float *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] = a*x[i] + b*y[i];
i += gridSize;
}
}
// performs the operation y[i] = a*x[i] + b*y[i]
void axpbyCuda(float a, float *x, float b, float *y, int len) {
int blocks = min(REDUCE_MAX_BLOCKS, max(len/REDUCE_THREADS, 1));
dim3 dimBlock(REDUCE_THREADS, 1, 1);
dim3 dimGrid(blocks, 1, 1);
hipLaunchKernelGGL(( axpbyKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, a, x, b, y, len);
}
// performs the operation y[i] = a*x[i] + y[i]
void axpyCuda(float a, float *x, float *y, int len) {
axpbyCuda(a, x, 1.0, y, len);
}
// performs the operation y[i] = x[i] + a*y[i]
void xpayCuda(float *x, float a, float *y, int len) {
axpbyCuda(1.0, x, a, y, len);
}
// performs the operation y[i] -= x[i] (minus x plus y)
void mxpyCuda(float *x, float *y, int len) {
axpbyCuda(-1.0, x, 1.0, y, len);
}
__global__ void axpyZpbxKernel(float a, float *x, float *y, float *z, float b, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
float x_i = x[i];
y[i] = a*x_i + y[i];
x[i] = z[i] + b*x_i;
i += gridSize;
}
}
// performs the operations: {y[i] = a x[i] + y[i]; x[i] = z[i] + b x[i]}
void axpyZpbxCuda(float a, float *x, float *y, float *z, float b, int len) {
int blocks = min(REDUCE_MAX_BLOCKS, max(len/REDUCE_THREADS, 1));
dim3 dimBlock(REDUCE_THREADS, 1, 1);
dim3 dimGrid(blocks, 1, 1);
hipLaunchKernelGGL(( axpyZpbxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, a, x, y, z, b, len);
}
// performs the operation y[i] = a*x[i] + y[i], and returns norm(y)
// float axpyNormCuda(float a, float *x, float *y, int len);
// Computes c = a + b in "double single" precision.
__device__ void dsadd(float &c0, float &c1, const float a0, const float a1, const float b0, const float b1) {
// Compute dsa + dsb using Knuth's trick.
float t1 = a0 + b0;
float e = t1 - a0;
float t2 = ((b0 - e) + (a0 - (t1 - e))) + a1 + b1;
// The result is t1 + t2, after normalization.
c0 = e = t1 + t2;
c1 = t2 - (e - t1);
}
//
// float sumCuda(float *a, int n) {}
//
#define REDUCE_FUNC_NAME(suffix) sum##suffix
#define REDUCE_TYPES float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) a[i]
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// float normCuda(float *a, int n) {}
//
#define REDUCE_FUNC_NAME(suffix) norm##suffix
#define REDUCE_TYPES float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*a[i])
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// float reDotProductCuda(float *a, float *b, int n) {}
//
#define REDUCE_FUNC_NAME(suffix) reDotProduct##suffix
#define REDUCE_TYPES float *a, float *b
#define REDUCE_PARAMS a, b
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*b[i])
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// float axpyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
#define REDUCE_FUNC_NAME(suffix) axpyNorm##suffix
#define REDUCE_TYPES float a, float *x, float *y
#define REDUCE_PARAMS a, x, y
#define REDUCE_AUXILIARY(i) y[i] = a*x[i] + y[i]
#define REDUCE_OPERATION(i) (y[i]*y[i])
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double cpuDouble(float *data, int size) {
double sum = 0;
for (int i = 0; i < size; i++)
sum += data[i];
return sum;
}
void blasTest() {
int n = 1<<26;
double mib = (double)n*sizeof(float) / (1 << 20);
float *h_data = (float *)malloc(n*sizeof(float));
float *d_data;
hipMalloc((void **)&d_data, n*sizeof(float));
for (int i = 0; i < n; i++) {
h_data[i] = rand()/(float)RAND_MAX - 0.5; // n-1.0-i;
}
hipMemcpy(d_data, h_data, n*sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopwatchStart();
int LOOPS = 20;
for (int i = 0; i < LOOPS; i++) {
sumCuda(d_data, n);
}
hipDeviceSynchronize();
float secs = stopwatchReadSeconds();
printf("%f GiB/s\n", (mib/1024) * LOOPS / secs);
printf("Device: %f MiB\n", mib);
printf("Shared: %f KiB\n", (float)REDUCE_THREADS*sizeof(float) / (1 << 10));
float correctDouble = cpuDouble(h_data, n);
printf("CPU: %f\n", correctDouble);
printf("CUDA: %f\n", sumCuda(d_data, n));
printf("Error: %f\n", fabs(correctDouble-sumCuda(d_data, n)));
hipFree(d_data) ;
free(h_data);
}
void axpbyTest() {
int n = 3 * 1 << 20;
float *h_x = (float *)malloc(n*sizeof(float));
float *h_y = (float *)malloc(n*sizeof(float));
float *h_res = (float *)malloc(n*sizeof(float));
float *d_x, *d_y;
hipMalloc((void **)&d_x, n*sizeof(float));
hipMalloc((void **)&d_y, n*sizeof(float));
for (int i = 0; i < n; i++) {
h_x[i] = 1;
h_y[i] = 2;
}
hipMemcpy(d_x, h_x, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, n*sizeof(float), hipMemcpyHostToDevice);
axpbyCuda(4, d_x, 3, d_y, n/2);
hipMemcpy( h_res, d_y, n*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
float expect = (i < n/2) ? 4*h_x[i] + 3*h_y[i] : h_y[i];
if (h_res[i] != expect)
printf("FAILED %d : %f != %f\n", i, h_res[i], h_y[i]);
}
hipFree(d_y);
hipFree(d_x);
free(h_x);
free(h_y);
}
| 051c9bd7f0aea77d1e555ddf4089f762ca2008a5.cu | #include <stdlib.h>
#include <stdio.h>
#include "qcd.h"
//#define REDUCE_PRECISION float
#define REDUCE_PRECISION double
#define REDUCE_THREADS 128
#define REDUCE_MAX_BLOCKS 128
void zeroCuda(float* dst, int len) {
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
cudaMemset(dst, 0, len*sizeof(float));
}
void copyCuda(float* dst, float *src, int len) {
cudaMemcpy(dst, src, len*sizeof(float), cudaMemcpyDeviceToDevice);
}
__global__ void axpbyKernel(float a, float *x, float b, float *y, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
y[i] = a*x[i] + b*y[i];
i += gridSize;
}
}
// performs the operation y[i] = a*x[i] + b*y[i]
void axpbyCuda(float a, float *x, float b, float *y, int len) {
int blocks = min(REDUCE_MAX_BLOCKS, max(len/REDUCE_THREADS, 1));
dim3 dimBlock(REDUCE_THREADS, 1, 1);
dim3 dimGrid(blocks, 1, 1);
axpbyKernel<<<dimGrid, dimBlock>>>(a, x, b, y, len);
}
// performs the operation y[i] = a*x[i] + y[i]
void axpyCuda(float a, float *x, float *y, int len) {
axpbyCuda(a, x, 1.0, y, len);
}
// performs the operation y[i] = x[i] + a*y[i]
void xpayCuda(float *x, float a, float *y, int len) {
axpbyCuda(1.0, x, a, y, len);
}
// performs the operation y[i] -= x[i] (minus x plus y)
void mxpyCuda(float *x, float *y, int len) {
axpbyCuda(-1.0, x, 1.0, y, len);
}
__global__ void axpyZpbxKernel(float a, float *x, float *y, float *z, float b, int len) {
unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x;
unsigned int gridSize = gridDim.x*blockDim.x;
while (i < len) {
float x_i = x[i];
y[i] = a*x_i + y[i];
x[i] = z[i] + b*x_i;
i += gridSize;
}
}
// performs the operations: {y[i] = a x[i] + y[i]; x[i] = z[i] + b x[i]}
void axpyZpbxCuda(float a, float *x, float *y, float *z, float b, int len) {
int blocks = min(REDUCE_MAX_BLOCKS, max(len/REDUCE_THREADS, 1));
dim3 dimBlock(REDUCE_THREADS, 1, 1);
dim3 dimGrid(blocks, 1, 1);
axpyZpbxKernel<<<dimGrid, dimBlock>>>(a, x, y, z, b, len);
}
// performs the operation y[i] = a*x[i] + y[i], and returns norm(y)
// float axpyNormCuda(float a, float *x, float *y, int len);
// Computes c = a + b in "double single" precision.
__device__ void dsadd(float &c0, float &c1, const float a0, const float a1, const float b0, const float b1) {
// Compute dsa + dsb using Knuth's trick.
float t1 = a0 + b0;
float e = t1 - a0;
float t2 = ((b0 - e) + (a0 - (t1 - e))) + a1 + b1;
// The result is t1 + t2, after normalization.
c0 = e = t1 + t2;
c1 = t2 - (e - t1);
}
//
// float sumCuda(float *a, int n) {}
//
#define REDUCE_FUNC_NAME(suffix) sum##suffix
#define REDUCE_TYPES float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) a[i]
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// float normCuda(float *a, int n) {}
//
#define REDUCE_FUNC_NAME(suffix) norm##suffix
#define REDUCE_TYPES float *a
#define REDUCE_PARAMS a
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*a[i])
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// float reDotProductCuda(float *a, float *b, int n) {}
//
#define REDUCE_FUNC_NAME(suffix) reDotProduct##suffix
#define REDUCE_TYPES float *a, float *b
#define REDUCE_PARAMS a, b
#define REDUCE_AUXILIARY(i)
#define REDUCE_OPERATION(i) (a[i]*b[i])
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
//
// float axpyNormCuda(float a, float *x, float *y, n){}
//
// First performs the operation y[i] = a*x[i] + y[i]
// Second returns the norm of y
//
#define REDUCE_FUNC_NAME(suffix) axpyNorm##suffix
#define REDUCE_TYPES float a, float *x, float *y
#define REDUCE_PARAMS a, x, y
#define REDUCE_AUXILIARY(i) y[i] = a*x[i] + y[i]
#define REDUCE_OPERATION(i) (y[i]*y[i])
#include "reduce_core.cu"
#undef REDUCE_FUNC_NAME
#undef REDUCE_TYPES
#undef REDUCE_PARAMS
#undef REDUCE_AUXILIARY
#undef REDUCE_OPERATION
double cpuDouble(float *data, int size) {
double sum = 0;
for (int i = 0; i < size; i++)
sum += data[i];
return sum;
}
void blasTest() {
int n = 1<<26;
double mib = (double)n*sizeof(float) / (1 << 20);
float *h_data = (float *)malloc(n*sizeof(float));
float *d_data;
cudaMalloc((void **)&d_data, n*sizeof(float));
for (int i = 0; i < n; i++) {
h_data[i] = rand()/(float)RAND_MAX - 0.5; // n-1.0-i;
}
cudaMemcpy(d_data, h_data, n*sizeof(float), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
stopwatchStart();
int LOOPS = 20;
for (int i = 0; i < LOOPS; i++) {
sumCuda(d_data, n);
}
cudaThreadSynchronize();
float secs = stopwatchReadSeconds();
printf("%f GiB/s\n", (mib/1024) * LOOPS / secs);
printf("Device: %f MiB\n", mib);
printf("Shared: %f KiB\n", (float)REDUCE_THREADS*sizeof(float) / (1 << 10));
float correctDouble = cpuDouble(h_data, n);
printf("CPU: %f\n", correctDouble);
printf("CUDA: %f\n", sumCuda(d_data, n));
printf("Error: %f\n", fabs(correctDouble-sumCuda(d_data, n)));
cudaFree(d_data) ;
free(h_data);
}
void axpbyTest() {
int n = 3 * 1 << 20;
float *h_x = (float *)malloc(n*sizeof(float));
float *h_y = (float *)malloc(n*sizeof(float));
float *h_res = (float *)malloc(n*sizeof(float));
float *d_x, *d_y;
cudaMalloc((void **)&d_x, n*sizeof(float));
cudaMalloc((void **)&d_y, n*sizeof(float));
for (int i = 0; i < n; i++) {
h_x[i] = 1;
h_y[i] = 2;
}
cudaMemcpy(d_x, h_x, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, n*sizeof(float), cudaMemcpyHostToDevice);
axpbyCuda(4, d_x, 3, d_y, n/2);
cudaMemcpy( h_res, d_y, n*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
float expect = (i < n/2) ? 4*h_x[i] + 3*h_y[i] : h_y[i];
if (h_res[i] != expect)
printf("FAILED %d : %f != %f\n", i, h_res[i], h_y[i]);
}
cudaFree(d_y);
cudaFree(d_x);
free(h_x);
free(h_y);
}
|
71401e1ba1eec2dd5c64f2c169bac1a104b44656.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2020 by Contributors
* \file rank_metric.cc
* \brief prediction rank based metrics.
* \author Kailong Chen, Tianqi Chen
*/
#include <cmath>
#include <vector>
#include <rabit/rabit.h>
#include <dmlc/registry.h>
#include <xgboost/metric.h>
#include <xgboost/host_device_vector.h>
#include <thrust/iterator/discard_iterator.h>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(rank_metric_gpu);
/*! \brief Evaluate rank list on GPU */
template <typename EvalMetricT>
struct EvalRankGpu : public Metric, public EvalRankConfig {
public:
bst_float Eval(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
bool distributed) override {
// Sanity check is done by the caller
std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(preds.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_;
const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1);
auto device = tparam_->gpu_id;
dh::safe_cuda(hipSetDevice(device));
info.labels_.SetDevice(device);
preds.SetDevice(device);
auto dpreds = preds.ConstDevicePointer();
auto dlabels = info.labels_.ConstDevicePointer();
// Sort all the predictions
dh::SegmentSorter<float> segment_pred_sorter;
segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr);
// Compute individual group metric and sum them up
return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this);
}
const char* Name() const override {
return name.c_str();
}
explicit EvalRankGpu(const char* name, const char* param) {
using namespace std; // NOLINT(*)
if (param != nullptr) {
std::ostringstream os;
if (sscanf(param, "%u[-]?", &this->topn) == 1) {
os << name << '@' << param;
this->name = os.str();
} else {
os << name << param;
this->name = os.str();
}
if (param[strlen(param) - 1] == '-') {
this->minus = true;
}
} else {
this->name = name;
}
}
};
/*! \brief Precision at N, for both classification and rank */
struct EvalPrecisionGpu {
public:
static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg) {
// Group info on device
const auto &dgroups = pred_sorter.GetGroupsSpan();
const auto ngroups = pred_sorter.GetNumGroups();
const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan();
// Original positions of the predictions after they have been sorted
const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan();
// First, determine non zero labels in the dataset individually
auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) {
return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0;
}; // NOLINT
// Find each group's metric sum
dh::caching_device_vector<uint32_t> hits(ngroups, 0);
const auto nitems = pred_sorter.GetNumItems();
auto *dhits = hits.data().get();
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) {
const auto group_idx = dgroup_idx[idx];
const auto group_begin = dgroups[group_idx];
const auto ridx = idx - group_begin;
if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) {
atomicAdd(&dhits[group_idx], 1);
}
});
// Allocator to be used for managing space overhead while performing reductions
dh::XGBCachingDeviceAllocator<char> alloc;
return static_cast<double>(thrust::reduce(thrust::hip::par(alloc),
hits.begin(), hits.end())) / ecfg.topn;
}
};
/*! \brief NDCG: Normalized Discounted Cumulative Gain at N */
struct EvalNDCGGpu {
public:
static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg,
// The order in which labels have to be accessed. The order is determined
// by sorting the predictions or the labels for the entire dataset
const xgboost::common::Span<const uint32_t> &dlabels_sort_order,
dh::caching_device_vector<double> *dcgptr) {
dh::caching_device_vector<double> &dcgs(*dcgptr);
// Group info on device
const auto &dgroups = pred_sorter.GetGroupsSpan();
const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan();
// First, determine non zero labels in the dataset individually
auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) {
return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]]));
}; // NOLINT
// Find each group's DCG value
const auto nitems = pred_sorter.GetNumItems();
auto *ddcgs = dcgs.data().get();
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) {
const auto group_idx = dgroup_idx[idx];
const auto group_begin = dgroups[group_idx];
const auto ridx = idx - group_begin;
auto label = DetermineNonTrivialLabelLambda(idx);
if (ridx < ecfg.topn && label) {
atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0));
}
});
}
static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg) {
// Sort the labels and compute IDCG
dh::SegmentSorter<float> segment_label_sorter;
segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(),
pred_sorter.GetGroupSegmentsSpan());
uint32_t ngroups = pred_sorter.GetNumGroups();
dh::caching_device_vector<double> idcg(ngroups, 0);
ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg);
// Compute the DCG values next
dh::caching_device_vector<double> dcg(ngroups, 0);
ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg);
double *ddcg = dcg.data().get();
double *didcg = idcg.data().get();
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// Compute the group's DCG and reduce it across all groups
dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
if (didcg[gidx] == 0.0f) {
ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f;
} else {
ddcg[gidx] /= didcg[gidx];
}
});
// Allocator to be used for managing space overhead while performing reductions
dh::XGBCachingDeviceAllocator<char> alloc;
return thrust::reduce(thrust::hip::par(alloc), dcg.begin(), dcg.end());
}
};
/*! \brief Mean Average Precision at N, for both classification and rank */
struct EvalMAPGpu {
public:
static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg) {
// Group info on device
const auto &dgroups = pred_sorter.GetGroupsSpan();
const auto ngroups = pred_sorter.GetNumGroups();
const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan();
// Original positions of the predictions after they have been sorted
const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan();
// First, determine non zero labels in the dataset individually
const auto nitems = pred_sorter.GetNumItems();
dh::caching_device_vector<uint32_t> hits(nitems, 0);
auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) {
return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0;
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
hits.begin(),
DetermineNonTrivialLabelLambda);
// Allocator to be used by sort for managing space overhead while performing prefix scans
dh::XGBCachingDeviceAllocator<char> alloc;
// Next, prefix scan the nontrivial labels that are segmented to accumulate them.
// This is required for computing the metric sum
// Data segmented into different groups...
thrust::inclusive_scan_by_key(thrust::hip::par(alloc),
dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx),
hits.begin(), // Input value
hits.begin()); // In-place scan
// Find each group's metric sum
dh::caching_device_vector<double> sumap(ngroups, 0);
auto *dsumap = sumap.data().get();
const auto *dhits = hits.data().get();
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) {
if (DetermineNonTrivialLabelLambda(idx)) {
const auto group_idx = dgroup_idx[idx];
const auto group_begin = dgroups[group_idx];
const auto ridx = idx - group_begin;
if (ridx < ecfg.topn) {
atomicAdd(&dsumap[group_idx],
static_cast<double>(dhits[idx]) / (ridx + 1));
}
}
});
// Aggregate the group's item precisions
dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0;
if (nhits != 0) {
dsumap[gidx] /= nhits;
} else {
if (ecfg.minus) {
dsumap[gidx] = 0;
} else {
dsumap[gidx] = 1;
}
}
});
return thrust::reduce(thrust::hip::par(alloc), sumap.begin(), sumap.end());
}
};
/*! \brief Area Under Curve metric computation for ranking datasets */
struct EvalAucGpu : public Metric {
public:
// This function object computes the positive precision pair for each prediction group
class ComputePosPair : public thrust::unary_function<uint32_t, double> {
public:
XGBOOST_DEVICE ComputePosPair(const double *pred_group_pos_precision,
const double *pred_group_neg_precision,
const double *pred_group_incr_precision)
: pred_group_pos_precision_(pred_group_pos_precision),
pred_group_neg_precision_(pred_group_neg_precision),
pred_group_incr_precision_(pred_group_incr_precision) {}
// Compute positive precision pair for the prediction group at 'idx'
__device__ __forceinline__ double operator()(uint32_t idx) const {
return pred_group_neg_precision_[idx] *
(pred_group_incr_precision_[idx] + pred_group_pos_precision_[idx] * 0.5);
}
private:
// Accumulated positive precision for the prediction group
const double *pred_group_pos_precision_{nullptr};
// Accumulated negative precision for the prediction group
const double *pred_group_neg_precision_{nullptr};
// Incremental positive precision for the prediction group
const double *pred_group_incr_precision_{nullptr};
};
template <typename T>
void ReleaseMemory(dh::caching_device_vector<T> &vec) { // NOLINT
dh::caching_device_vector<T>().swap(vec);
}
bst_float Eval(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
bool distributed) override {
// Sanity check is done by the caller
std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_;
auto device = tparam_->gpu_id;
dh::safe_cuda(hipSetDevice(device));
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
auto dpreds = preds.ConstDevicePointer();
auto dlabels = info.labels_.ConstDevicePointer();
auto dweights = info.weights_.ConstDevicePointer();
// Sort all the predictions (from one or more groups)
dh::SegmentSorter<float> segment_pred_sorter;
segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr);
const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan();
const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan();
// Group info on device
const auto &dgroups = segment_pred_sorter.GetGroupsSpan();
uint32_t ngroups = segment_pred_sorter.GetNumGroups();
// Final values
double hsum_auc = 0.0;
unsigned hauc_error = 0;
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// Allocator to be used for managing space overhead while performing reductions
dh::XGBCachingDeviceAllocator<char> alloc;
if (ngroups == 1) {
const auto nitems = segment_pred_sorter.GetNumItems();
// First, segment all the predictions in the group. This is required so that we can
// aggregate the positive and negative precisions within that prediction group
dh::caching_device_vector<unsigned> dpred_segs(nitems, 0);
auto *pred_seg_arr = dpred_segs.data().get();
// This is for getting the next segment number
dh::caching_device_vector<unsigned> seg_idx(1, 0);
auto *seg_idx_ptr = seg_idx.data().get();
dh::caching_device_vector<double> dbuf_pos(nitems, 0);
dh::caching_device_vector<double> dbuf_neg(nitems, 0);
auto *buf_pos_arr = dbuf_pos.data().get();
auto *buf_neg_arr = dbuf_neg.data().get();
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(int idx) {
auto ctr = dlabels[dpreds_orig_pos[idx]];
// For ranking task, weights are per-group
// For binary classification task, weights are per-instance
const auto wt = dweights == nullptr ? 1.0f : dweights[dpreds_orig_pos[idx]];
buf_pos_arr[idx] = ctr * wt;
buf_neg_arr[idx] = (1.0f - ctr) * wt;
if (idx == nitems - 1 || dsorted_preds[idx] != dsorted_preds[idx + 1]) {
auto new_seg_idx = atomicAdd(seg_idx_ptr, 1);
auto pred_val = dsorted_preds[idx];
do {
pred_seg_arr[idx] = new_seg_idx;
idx--;
} while (idx >= 0 && dsorted_preds[idx] == pred_val);
}
});
auto nunique_preds = seg_idx.back();
ReleaseMemory(seg_idx);
// Next, accumulate the positive and negative precisions for every prediction group
dh::caching_device_vector<double> sum_dbuf_pos(nunique_preds, 0);
auto itr = thrust::reduce_by_key(thrust::hip::par(alloc),
dpred_segs.begin(), dpred_segs.end(), // Segmented by this
dbuf_pos.begin(), // Individual precisions
thrust::make_discard_iterator(), // Ignore unique segments
sum_dbuf_pos.begin()); // Write accumulated results here
ReleaseMemory(dbuf_pos);
CHECK(itr.second - sum_dbuf_pos.begin() == nunique_preds);
dh::caching_device_vector<double> sum_dbuf_neg(nunique_preds, 0);
itr = thrust::reduce_by_key(thrust::hip::par(alloc),
dpred_segs.begin(), dpred_segs.end(),
dbuf_neg.begin(),
thrust::make_discard_iterator(),
sum_dbuf_neg.begin());
ReleaseMemory(dbuf_neg);
ReleaseMemory(dpred_segs);
CHECK(itr.second - sum_dbuf_neg.begin() == nunique_preds);
dh::caching_device_vector<double> sum_nneg(nunique_preds, 0);
thrust::inclusive_scan(thrust::hip::par(alloc),
sum_dbuf_neg.begin(), sum_dbuf_neg.end(),
sum_nneg.begin());
double sum_neg_prec_val = sum_nneg.back();
ReleaseMemory(sum_nneg);
// Find incremental sum for the positive precisions that is then used to
// compute incremental positive precision pair
dh::caching_device_vector<double> sum_npos(nunique_preds + 1, 0);
thrust::inclusive_scan(thrust::hip::par(alloc),
sum_dbuf_pos.begin(), sum_dbuf_pos.end(),
sum_npos.begin() + 1);
double sum_pos_prec_val = sum_npos.back();
if (sum_pos_prec_val <= 0.0 || sum_neg_prec_val <= 0.0) {
hauc_error = 1;
} else {
dh::caching_device_vector<double> sum_pospair(nunique_preds, 0);
// Finally, compute the positive precision pair
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(static_cast<uint32_t>(nunique_preds)),
sum_pospair.begin(),
ComputePosPair(sum_dbuf_pos.data().get(),
sum_dbuf_neg.data().get(),
sum_npos.data().get()));
ReleaseMemory(sum_dbuf_pos);
ReleaseMemory(sum_dbuf_neg);
ReleaseMemory(sum_npos);
hsum_auc = thrust::reduce(thrust::hip::par(alloc),
sum_pospair.begin(), sum_pospair.end())
/ (sum_pos_prec_val * sum_neg_prec_val);
}
} else {
// AUC sum for each group
dh::caching_device_vector<double> sum_auc(ngroups, 0);
// AUC error across all groups
dh::caching_device_vector<int> auc_error(1, 0);
auto *dsum_auc = sum_auc.data().get();
auto *dauc_error = auc_error.data().get();
// For each group item compute the aggregated precision
dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
double sum_pospair = 0.0, sum_npos = 0.0, sum_nneg = 0.0, buf_pos = 0.0, buf_neg = 0.0;
for (auto i = dgroups[gidx]; i < dgroups[gidx + 1]; ++i) {
const auto ctr = dlabels[dpreds_orig_pos[i]];
// Keep bucketing predictions in same bucket
if (i != dgroups[gidx] && dsorted_preds[i] != dsorted_preds[i - 1]) {
sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5);
sum_npos += buf_pos;
sum_nneg += buf_neg;
buf_neg = buf_pos = 0.0f;
}
// For ranking task, weights are per-group
// For binary classification task, weights are per-instance
const auto wt = dweights == nullptr ? 1.0f : dweights[gidx];
buf_pos += ctr * wt;
buf_neg += (1.0f - ctr) * wt;
}
sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5);
sum_npos += buf_pos;
sum_nneg += buf_neg;
// Check weird conditions
if (sum_npos <= 0.0 || sum_nneg <= 0.0) {
atomicAdd(dauc_error, 1);
} else {
// This is the AUC
dsum_auc[gidx] = sum_pospair / (sum_npos * sum_nneg);
}
});
hsum_auc = thrust::reduce(thrust::hip::par(alloc), sum_auc.begin(), sum_auc.end());
hauc_error = auc_error.back(); // Copy it back to host
}
// Report average AUC across all groups
// In distributed mode, workers which only contains pos or neg samples
// will be ignored when aggregate AUC.
bst_float dat[2] = {0.0f, 0.0f};
if (hauc_error < ngroups) {
dat[0] = static_cast<bst_float>(hsum_auc);
dat[1] = static_cast<bst_float>(ngroups - hauc_error);
}
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
CHECK_GT(dat[1], 0.0f)
<< "AUC: the dataset only contains pos or neg samples";
return dat[0] / dat[1];
}
const char* Name() const override {
return "auc";
}
};
/*! \brief Area Under PR Curve metric computation for ranking datasets */
struct EvalAucPRGpu : public Metric {
public:
// This function object computes the item's positive/negative precision value
class ComputeItemPrecision : public thrust::unary_function<uint32_t, float> {
public:
// The precision type to be computed
enum class PrecisionType {
kPositive,
kNegative
};
XGBOOST_DEVICE ComputeItemPrecision(PrecisionType ptype,
uint32_t ngroups,
const float *dweights,
const xgboost::common::Span<const uint32_t> &dgidxs,
const float *dlabels)
: ptype_(ptype), ngroups_(ngroups), dweights_(dweights), dgidxs_(dgidxs), dlabels_(dlabels) {}
// Compute precision value for the prediction that was originally at 'idx'
__device__ __forceinline__ float operator()(uint32_t idx) const {
// For ranking task, weights are per-group
// For binary classification task, weights are per-instance
const auto wt = dweights_ == nullptr ? 1.0f : dweights_[ngroups_ == 1 ? idx : dgidxs_[idx]];
return wt * (ptype_ == PrecisionType::kPositive ? dlabels_[idx] : (1.0f - dlabels_[idx]));
}
private:
PrecisionType ptype_; // Precision type to be computed
uint32_t ngroups_; // Number of groups in the dataset
const float *dweights_; // Instance/group weights
const xgboost::common::Span<const uint32_t> dgidxs_; // The group a given instance belongs to
const float *dlabels_; // Unsorted labels in the dataset
};
bst_float Eval(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
bool distributed) override {
// Sanity check is done by the caller
std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_;
auto device = tparam_->gpu_id;
dh::safe_cuda(hipSetDevice(device));
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
auto dpreds = preds.ConstDevicePointer();
auto dlabels = info.labels_.ConstDevicePointer();
auto dweights = info.weights_.ConstDevicePointer();
// Sort all the predictions
dh::SegmentSorter<float> segment_pred_sorter;
segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr);
const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan();
// Original positions of the predictions after they have been sorted
const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan();
// Group info on device
const auto &dgroups = segment_pred_sorter.GetGroupsSpan();
uint32_t ngroups = segment_pred_sorter.GetNumGroups();
const auto &dgroup_idx = segment_pred_sorter.GetGroupSegmentsSpan();
// First, aggregate the positive and negative precision for each group
dh::caching_device_vector<double> total_pos(ngroups, 0);
dh::caching_device_vector<double> total_neg(ngroups, 0);
// Allocator to be used for managing space overhead while performing transformed reductions
dh::XGBCachingDeviceAllocator<char> alloc;
// Compute each elements positive precision value and reduce them across groups concurrently.
ComputeItemPrecision pos_prec_functor(ComputeItemPrecision::PrecisionType::kPositive,
ngroups, dweights, dgroup_idx, dlabels);
auto end_range =
thrust::reduce_by_key(thrust::hip::par(alloc),
dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of positive precision values within a group
dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()),
pos_prec_functor),
thrust::make_discard_iterator(), // We don't care for the group indices
total_pos.begin()); // Sum of positive precision values in the group
CHECK(end_range.second - total_pos.begin() == total_pos.size());
// Compute each elements negative precision value and reduce them across groups concurrently.
ComputeItemPrecision neg_prec_functor(ComputeItemPrecision::PrecisionType::kNegative,
ngroups, dweights, dgroup_idx, dlabels);
end_range =
thrust::reduce_by_key(thrust::hip::par(alloc),
dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of negative precision values within a group
dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()),
neg_prec_functor),
thrust::make_discard_iterator(), // We don't care for the group indices
total_neg.begin()); // Sum of negative precision values in the group
CHECK(end_range.second - total_neg.begin() == total_neg.size());
const auto *dtotal_pos = total_pos.data().get();
const auto *dtotal_neg = total_neg.data().get();
// AUC sum for each group
dh::caching_device_vector<double> sum_auc(ngroups, 0);
// AUC error across all groups
dh::caching_device_vector<int> auc_error(1, 0);
auto *dsum_auc = sum_auc.data().get();
auto *dauc_error = auc_error.data().get();
int device_id = -1;
dh::safe_cuda(hipGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
// We need pos > 0 && neg > 0
if (dtotal_pos[gidx] <= 0.0 || dtotal_neg[gidx] <= 0.0) {
atomicAdd(dauc_error, 1);
} else {
auto gbegin = dgroups[gidx];
auto gend = dgroups[gidx + 1];
// Calculate AUC
double tp = 0.0, prevtp = 0.0, fp = 0.0, prevfp = 0.0, h = 0.0, a = 0.0, b = 0.0;
for (auto i = gbegin; i < gend; ++i) {
const auto wt = dweights == nullptr ? 1.0f
: dweights[ngroups == 1 ? dpreds_orig_pos[i] : gidx];
tp += wt * dlabels[dpreds_orig_pos[i]];
fp += wt * (1.0f - dlabels[dpreds_orig_pos[i]]);
if ((i < gend - 1 && dsorted_preds[i] != dsorted_preds[i + 1]) || (i == gend - 1)) {
if (tp == prevtp) {
a = 1.0;
b = 0.0;
} else {
h = (fp - prevfp) / (tp - prevtp);
a = 1.0 + h;
b = (prevfp - h * prevtp) / dtotal_pos[gidx];
}
if (0.0 != b) {
dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx] -
b / a * (::log(a * tp / dtotal_pos[gidx] + b) -
::log(a * prevtp / dtotal_pos[gidx] + b))) / a;
} else {
dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx]) / a;
}
prevtp = tp;
prevfp = fp;
}
}
// Sanity check
if (tp < 0 || prevtp < 0 || fp < 0 || prevfp < 0) {
// Check if we have any metric error thus far
auto current_auc_error = atomicAdd(dauc_error, 0);
KERNEL_CHECK(!current_auc_error);
}
}
});
const auto hsum_auc = thrust::reduce(thrust::hip::par(alloc), sum_auc.begin(), sum_auc.end());
const auto hauc_error = auc_error.back(); // Copy it back to host
// Report average AUC-PR across all groups
// In distributed mode, workers which only contains pos or neg samples
// will be ignored when aggregate AUC-PR.
bst_float dat[2] = {0.0f, 0.0f};
if (hauc_error < static_cast<int>(ngroups)) {
dat[0] = static_cast<bst_float>(hsum_auc);
dat[1] = static_cast<bst_float>(static_cast<int>(ngroups) - hauc_error);
}
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
CHECK_GT(dat[1], 0.0f)
<< "AUC-PR: the dataset only contains pos or neg samples";
CHECK_LE(dat[0], dat[1]) << "AUC-PR: AUC > 1.0";
return dat[0] / dat[1];
}
const char* Name() const override {
return "aucpr";
}
};
XGBOOST_REGISTER_GPU_METRIC(AucGpu, "auc")
.describe("Area under curve for rank computed on GPU.")
.set_body([](const char* param) { return new EvalAucGpu(); });
XGBOOST_REGISTER_GPU_METRIC(AucPRGpu, "aucpr")
.describe("Area under PR curve for rank computed on GPU.")
.set_body([](const char* param) { return new EvalAucPRGpu(); });
XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre")
.describe("precision@k for rank computed on GPU.")
.set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); });
XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg")
.describe("ndcg@k for rank computed on GPU.")
.set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); });
XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map")
.describe("map@k for rank computed on GPU.")
.set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); });
} // namespace metric
} // namespace xgboost
| 71401e1ba1eec2dd5c64f2c169bac1a104b44656.cu | /*!
* Copyright 2020 by Contributors
* \file rank_metric.cc
* \brief prediction rank based metrics.
* \author Kailong Chen, Tianqi Chen
*/
#include <cmath>
#include <vector>
#include <rabit/rabit.h>
#include <dmlc/registry.h>
#include <xgboost/metric.h>
#include <xgboost/host_device_vector.h>
#include <thrust/iterator/discard_iterator.h>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(rank_metric_gpu);
/*! \brief Evaluate rank list on GPU */
template <typename EvalMetricT>
struct EvalRankGpu : public Metric, public EvalRankConfig {
public:
bst_float Eval(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
bool distributed) override {
// Sanity check is done by the caller
std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(preds.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_;
const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1);
auto device = tparam_->gpu_id;
dh::safe_cuda(cudaSetDevice(device));
info.labels_.SetDevice(device);
preds.SetDevice(device);
auto dpreds = preds.ConstDevicePointer();
auto dlabels = info.labels_.ConstDevicePointer();
// Sort all the predictions
dh::SegmentSorter<float> segment_pred_sorter;
segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr);
// Compute individual group metric and sum them up
return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this);
}
const char* Name() const override {
return name.c_str();
}
explicit EvalRankGpu(const char* name, const char* param) {
using namespace std; // NOLINT(*)
if (param != nullptr) {
std::ostringstream os;
if (sscanf(param, "%u[-]?", &this->topn) == 1) {
os << name << '@' << param;
this->name = os.str();
} else {
os << name << param;
this->name = os.str();
}
if (param[strlen(param) - 1] == '-') {
this->minus = true;
}
} else {
this->name = name;
}
}
};
/*! \brief Precision at N, for both classification and rank */
struct EvalPrecisionGpu {
public:
static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg) {
// Group info on device
const auto &dgroups = pred_sorter.GetGroupsSpan();
const auto ngroups = pred_sorter.GetNumGroups();
const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan();
// Original positions of the predictions after they have been sorted
const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan();
// First, determine non zero labels in the dataset individually
auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) {
return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0;
}; // NOLINT
// Find each group's metric sum
dh::caching_device_vector<uint32_t> hits(ngroups, 0);
const auto nitems = pred_sorter.GetNumItems();
auto *dhits = hits.data().get();
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) {
const auto group_idx = dgroup_idx[idx];
const auto group_begin = dgroups[group_idx];
const auto ridx = idx - group_begin;
if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) {
atomicAdd(&dhits[group_idx], 1);
}
});
// Allocator to be used for managing space overhead while performing reductions
dh::XGBCachingDeviceAllocator<char> alloc;
return static_cast<double>(thrust::reduce(thrust::cuda::par(alloc),
hits.begin(), hits.end())) / ecfg.topn;
}
};
/*! \brief NDCG: Normalized Discounted Cumulative Gain at N */
struct EvalNDCGGpu {
public:
static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg,
// The order in which labels have to be accessed. The order is determined
// by sorting the predictions or the labels for the entire dataset
const xgboost::common::Span<const uint32_t> &dlabels_sort_order,
dh::caching_device_vector<double> *dcgptr) {
dh::caching_device_vector<double> &dcgs(*dcgptr);
// Group info on device
const auto &dgroups = pred_sorter.GetGroupsSpan();
const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan();
// First, determine non zero labels in the dataset individually
auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) {
return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]]));
}; // NOLINT
// Find each group's DCG value
const auto nitems = pred_sorter.GetNumItems();
auto *ddcgs = dcgs.data().get();
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) {
const auto group_idx = dgroup_idx[idx];
const auto group_begin = dgroups[group_idx];
const auto ridx = idx - group_begin;
auto label = DetermineNonTrivialLabelLambda(idx);
if (ridx < ecfg.topn && label) {
atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0));
}
});
}
static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg) {
// Sort the labels and compute IDCG
dh::SegmentSorter<float> segment_label_sorter;
segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(),
pred_sorter.GetGroupSegmentsSpan());
uint32_t ngroups = pred_sorter.GetNumGroups();
dh::caching_device_vector<double> idcg(ngroups, 0);
ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg);
// Compute the DCG values next
dh::caching_device_vector<double> dcg(ngroups, 0);
ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg);
double *ddcg = dcg.data().get();
double *didcg = idcg.data().get();
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// Compute the group's DCG and reduce it across all groups
dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
if (didcg[gidx] == 0.0f) {
ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f;
} else {
ddcg[gidx] /= didcg[gidx];
}
});
// Allocator to be used for managing space overhead while performing reductions
dh::XGBCachingDeviceAllocator<char> alloc;
return thrust::reduce(thrust::cuda::par(alloc), dcg.begin(), dcg.end());
}
};
/*! \brief Mean Average Precision at N, for both classification and rank */
struct EvalMAPGpu {
public:
static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter,
const float *dlabels,
const EvalRankConfig &ecfg) {
// Group info on device
const auto &dgroups = pred_sorter.GetGroupsSpan();
const auto ngroups = pred_sorter.GetNumGroups();
const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan();
// Original positions of the predictions after they have been sorted
const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan();
// First, determine non zero labels in the dataset individually
const auto nitems = pred_sorter.GetNumItems();
dh::caching_device_vector<uint32_t> hits(nitems, 0);
auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) {
return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0;
}; // NOLINT
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(nitems),
hits.begin(),
DetermineNonTrivialLabelLambda);
// Allocator to be used by sort for managing space overhead while performing prefix scans
dh::XGBCachingDeviceAllocator<char> alloc;
// Next, prefix scan the nontrivial labels that are segmented to accumulate them.
// This is required for computing the metric sum
// Data segmented into different groups...
thrust::inclusive_scan_by_key(thrust::cuda::par(alloc),
dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx),
hits.begin(), // Input value
hits.begin()); // In-place scan
// Find each group's metric sum
dh::caching_device_vector<double> sumap(ngroups, 0);
auto *dsumap = sumap.data().get();
const auto *dhits = hits.data().get();
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) {
if (DetermineNonTrivialLabelLambda(idx)) {
const auto group_idx = dgroup_idx[idx];
const auto group_begin = dgroups[group_idx];
const auto ridx = idx - group_begin;
if (ridx < ecfg.topn) {
atomicAdd(&dsumap[group_idx],
static_cast<double>(dhits[idx]) / (ridx + 1));
}
}
});
// Aggregate the group's item precisions
dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0;
if (nhits != 0) {
dsumap[gidx] /= nhits;
} else {
if (ecfg.minus) {
dsumap[gidx] = 0;
} else {
dsumap[gidx] = 1;
}
}
});
return thrust::reduce(thrust::cuda::par(alloc), sumap.begin(), sumap.end());
}
};
/*! \brief Area Under Curve metric computation for ranking datasets */
struct EvalAucGpu : public Metric {
public:
// This function object computes the positive precision pair for each prediction group
class ComputePosPair : public thrust::unary_function<uint32_t, double> {
public:
XGBOOST_DEVICE ComputePosPair(const double *pred_group_pos_precision,
const double *pred_group_neg_precision,
const double *pred_group_incr_precision)
: pred_group_pos_precision_(pred_group_pos_precision),
pred_group_neg_precision_(pred_group_neg_precision),
pred_group_incr_precision_(pred_group_incr_precision) {}
// Compute positive precision pair for the prediction group at 'idx'
__device__ __forceinline__ double operator()(uint32_t idx) const {
return pred_group_neg_precision_[idx] *
(pred_group_incr_precision_[idx] + pred_group_pos_precision_[idx] * 0.5);
}
private:
// Accumulated positive precision for the prediction group
const double *pred_group_pos_precision_{nullptr};
// Accumulated negative precision for the prediction group
const double *pred_group_neg_precision_{nullptr};
// Incremental positive precision for the prediction group
const double *pred_group_incr_precision_{nullptr};
};
template <typename T>
void ReleaseMemory(dh::caching_device_vector<T> &vec) { // NOLINT
dh::caching_device_vector<T>().swap(vec);
}
bst_float Eval(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
bool distributed) override {
// Sanity check is done by the caller
std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_;
auto device = tparam_->gpu_id;
dh::safe_cuda(cudaSetDevice(device));
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
auto dpreds = preds.ConstDevicePointer();
auto dlabels = info.labels_.ConstDevicePointer();
auto dweights = info.weights_.ConstDevicePointer();
// Sort all the predictions (from one or more groups)
dh::SegmentSorter<float> segment_pred_sorter;
segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr);
const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan();
const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan();
// Group info on device
const auto &dgroups = segment_pred_sorter.GetGroupsSpan();
uint32_t ngroups = segment_pred_sorter.GetNumGroups();
// Final values
double hsum_auc = 0.0;
unsigned hauc_error = 0;
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// Allocator to be used for managing space overhead while performing reductions
dh::XGBCachingDeviceAllocator<char> alloc;
if (ngroups == 1) {
const auto nitems = segment_pred_sorter.GetNumItems();
// First, segment all the predictions in the group. This is required so that we can
// aggregate the positive and negative precisions within that prediction group
dh::caching_device_vector<unsigned> dpred_segs(nitems, 0);
auto *pred_seg_arr = dpred_segs.data().get();
// This is for getting the next segment number
dh::caching_device_vector<unsigned> seg_idx(1, 0);
auto *seg_idx_ptr = seg_idx.data().get();
dh::caching_device_vector<double> dbuf_pos(nitems, 0);
dh::caching_device_vector<double> dbuf_neg(nitems, 0);
auto *buf_pos_arr = dbuf_pos.data().get();
auto *buf_neg_arr = dbuf_neg.data().get();
dh::LaunchN(device_id, nitems, nullptr, [=] __device__(int idx) {
auto ctr = dlabels[dpreds_orig_pos[idx]];
// For ranking task, weights are per-group
// For binary classification task, weights are per-instance
const auto wt = dweights == nullptr ? 1.0f : dweights[dpreds_orig_pos[idx]];
buf_pos_arr[idx] = ctr * wt;
buf_neg_arr[idx] = (1.0f - ctr) * wt;
if (idx == nitems - 1 || dsorted_preds[idx] != dsorted_preds[idx + 1]) {
auto new_seg_idx = atomicAdd(seg_idx_ptr, 1);
auto pred_val = dsorted_preds[idx];
do {
pred_seg_arr[idx] = new_seg_idx;
idx--;
} while (idx >= 0 && dsorted_preds[idx] == pred_val);
}
});
auto nunique_preds = seg_idx.back();
ReleaseMemory(seg_idx);
// Next, accumulate the positive and negative precisions for every prediction group
dh::caching_device_vector<double> sum_dbuf_pos(nunique_preds, 0);
auto itr = thrust::reduce_by_key(thrust::cuda::par(alloc),
dpred_segs.begin(), dpred_segs.end(), // Segmented by this
dbuf_pos.begin(), // Individual precisions
thrust::make_discard_iterator(), // Ignore unique segments
sum_dbuf_pos.begin()); // Write accumulated results here
ReleaseMemory(dbuf_pos);
CHECK(itr.second - sum_dbuf_pos.begin() == nunique_preds);
dh::caching_device_vector<double> sum_dbuf_neg(nunique_preds, 0);
itr = thrust::reduce_by_key(thrust::cuda::par(alloc),
dpred_segs.begin(), dpred_segs.end(),
dbuf_neg.begin(),
thrust::make_discard_iterator(),
sum_dbuf_neg.begin());
ReleaseMemory(dbuf_neg);
ReleaseMemory(dpred_segs);
CHECK(itr.second - sum_dbuf_neg.begin() == nunique_preds);
dh::caching_device_vector<double> sum_nneg(nunique_preds, 0);
thrust::inclusive_scan(thrust::cuda::par(alloc),
sum_dbuf_neg.begin(), sum_dbuf_neg.end(),
sum_nneg.begin());
double sum_neg_prec_val = sum_nneg.back();
ReleaseMemory(sum_nneg);
// Find incremental sum for the positive precisions that is then used to
// compute incremental positive precision pair
dh::caching_device_vector<double> sum_npos(nunique_preds + 1, 0);
thrust::inclusive_scan(thrust::cuda::par(alloc),
sum_dbuf_pos.begin(), sum_dbuf_pos.end(),
sum_npos.begin() + 1);
double sum_pos_prec_val = sum_npos.back();
if (sum_pos_prec_val <= 0.0 || sum_neg_prec_val <= 0.0) {
hauc_error = 1;
} else {
dh::caching_device_vector<double> sum_pospair(nunique_preds, 0);
// Finally, compute the positive precision pair
thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)),
thrust::make_counting_iterator(static_cast<uint32_t>(nunique_preds)),
sum_pospair.begin(),
ComputePosPair(sum_dbuf_pos.data().get(),
sum_dbuf_neg.data().get(),
sum_npos.data().get()));
ReleaseMemory(sum_dbuf_pos);
ReleaseMemory(sum_dbuf_neg);
ReleaseMemory(sum_npos);
hsum_auc = thrust::reduce(thrust::cuda::par(alloc),
sum_pospair.begin(), sum_pospair.end())
/ (sum_pos_prec_val * sum_neg_prec_val);
}
} else {
// AUC sum for each group
dh::caching_device_vector<double> sum_auc(ngroups, 0);
// AUC error across all groups
dh::caching_device_vector<int> auc_error(1, 0);
auto *dsum_auc = sum_auc.data().get();
auto *dauc_error = auc_error.data().get();
// For each group item compute the aggregated precision
dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
double sum_pospair = 0.0, sum_npos = 0.0, sum_nneg = 0.0, buf_pos = 0.0, buf_neg = 0.0;
for (auto i = dgroups[gidx]; i < dgroups[gidx + 1]; ++i) {
const auto ctr = dlabels[dpreds_orig_pos[i]];
// Keep bucketing predictions in same bucket
if (i != dgroups[gidx] && dsorted_preds[i] != dsorted_preds[i - 1]) {
sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5);
sum_npos += buf_pos;
sum_nneg += buf_neg;
buf_neg = buf_pos = 0.0f;
}
// For ranking task, weights are per-group
// For binary classification task, weights are per-instance
const auto wt = dweights == nullptr ? 1.0f : dweights[gidx];
buf_pos += ctr * wt;
buf_neg += (1.0f - ctr) * wt;
}
sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5);
sum_npos += buf_pos;
sum_nneg += buf_neg;
// Check weird conditions
if (sum_npos <= 0.0 || sum_nneg <= 0.0) {
atomicAdd(dauc_error, 1);
} else {
// This is the AUC
dsum_auc[gidx] = sum_pospair / (sum_npos * sum_nneg);
}
});
hsum_auc = thrust::reduce(thrust::cuda::par(alloc), sum_auc.begin(), sum_auc.end());
hauc_error = auc_error.back(); // Copy it back to host
}
// Report average AUC across all groups
// In distributed mode, workers which only contains pos or neg samples
// will be ignored when aggregate AUC.
bst_float dat[2] = {0.0f, 0.0f};
if (hauc_error < ngroups) {
dat[0] = static_cast<bst_float>(hsum_auc);
dat[1] = static_cast<bst_float>(ngroups - hauc_error);
}
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
CHECK_GT(dat[1], 0.0f)
<< "AUC: the dataset only contains pos or neg samples";
return dat[0] / dat[1];
}
const char* Name() const override {
return "auc";
}
};
/*! \brief Area Under PR Curve metric computation for ranking datasets */
struct EvalAucPRGpu : public Metric {
public:
// This function object computes the item's positive/negative precision value
class ComputeItemPrecision : public thrust::unary_function<uint32_t, float> {
public:
// The precision type to be computed
enum class PrecisionType {
kPositive,
kNegative
};
XGBOOST_DEVICE ComputeItemPrecision(PrecisionType ptype,
uint32_t ngroups,
const float *dweights,
const xgboost::common::Span<const uint32_t> &dgidxs,
const float *dlabels)
: ptype_(ptype), ngroups_(ngroups), dweights_(dweights), dgidxs_(dgidxs), dlabels_(dlabels) {}
// Compute precision value for the prediction that was originally at 'idx'
__device__ __forceinline__ float operator()(uint32_t idx) const {
// For ranking task, weights are per-group
// For binary classification task, weights are per-instance
const auto wt = dweights_ == nullptr ? 1.0f : dweights_[ngroups_ == 1 ? idx : dgidxs_[idx]];
return wt * (ptype_ == PrecisionType::kPositive ? dlabels_[idx] : (1.0f - dlabels_[idx]));
}
private:
PrecisionType ptype_; // Precision type to be computed
uint32_t ngroups_; // Number of groups in the dataset
const float *dweights_; // Instance/group weights
const xgboost::common::Span<const uint32_t> dgidxs_; // The group a given instance belongs to
const float *dlabels_; // Unsorted labels in the dataset
};
bst_float Eval(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
bool distributed) override {
// Sanity check is done by the caller
std::vector<unsigned> tgptr(2, 0);
tgptr[1] = static_cast<unsigned>(info.labels_.Size());
const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_;
auto device = tparam_->gpu_id;
dh::safe_cuda(cudaSetDevice(device));
info.labels_.SetDevice(device);
preds.SetDevice(device);
info.weights_.SetDevice(device);
auto dpreds = preds.ConstDevicePointer();
auto dlabels = info.labels_.ConstDevicePointer();
auto dweights = info.weights_.ConstDevicePointer();
// Sort all the predictions
dh::SegmentSorter<float> segment_pred_sorter;
segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr);
const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan();
// Original positions of the predictions after they have been sorted
const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan();
// Group info on device
const auto &dgroups = segment_pred_sorter.GetGroupsSpan();
uint32_t ngroups = segment_pred_sorter.GetNumGroups();
const auto &dgroup_idx = segment_pred_sorter.GetGroupSegmentsSpan();
// First, aggregate the positive and negative precision for each group
dh::caching_device_vector<double> total_pos(ngroups, 0);
dh::caching_device_vector<double> total_neg(ngroups, 0);
// Allocator to be used for managing space overhead while performing transformed reductions
dh::XGBCachingDeviceAllocator<char> alloc;
// Compute each elements positive precision value and reduce them across groups concurrently.
ComputeItemPrecision pos_prec_functor(ComputeItemPrecision::PrecisionType::kPositive,
ngroups, dweights, dgroup_idx, dlabels);
auto end_range =
thrust::reduce_by_key(thrust::cuda::par(alloc),
dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of positive precision values within a group
dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()),
pos_prec_functor),
thrust::make_discard_iterator(), // We don't care for the group indices
total_pos.begin()); // Sum of positive precision values in the group
CHECK(end_range.second - total_pos.begin() == total_pos.size());
// Compute each elements negative precision value and reduce them across groups concurrently.
ComputeItemPrecision neg_prec_functor(ComputeItemPrecision::PrecisionType::kNegative,
ngroups, dweights, dgroup_idx, dlabels);
end_range =
thrust::reduce_by_key(thrust::cuda::par(alloc),
dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx),
thrust::make_transform_iterator(
// The indices need not be sequential within a group, as we care only
// about the sum of negative precision values within a group
dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()),
neg_prec_functor),
thrust::make_discard_iterator(), // We don't care for the group indices
total_neg.begin()); // Sum of negative precision values in the group
CHECK(end_range.second - total_neg.begin() == total_neg.size());
const auto *dtotal_pos = total_pos.data().get();
const auto *dtotal_neg = total_neg.data().get();
// AUC sum for each group
dh::caching_device_vector<double> sum_auc(ngroups, 0);
// AUC error across all groups
dh::caching_device_vector<int> auc_error(1, 0);
auto *dsum_auc = sum_auc.data().get();
auto *dauc_error = auc_error.data().get();
int device_id = -1;
dh::safe_cuda(cudaGetDevice(&device_id));
// For each group item compute the aggregated precision
dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) {
// We need pos > 0 && neg > 0
if (dtotal_pos[gidx] <= 0.0 || dtotal_neg[gidx] <= 0.0) {
atomicAdd(dauc_error, 1);
} else {
auto gbegin = dgroups[gidx];
auto gend = dgroups[gidx + 1];
// Calculate AUC
double tp = 0.0, prevtp = 0.0, fp = 0.0, prevfp = 0.0, h = 0.0, a = 0.0, b = 0.0;
for (auto i = gbegin; i < gend; ++i) {
const auto wt = dweights == nullptr ? 1.0f
: dweights[ngroups == 1 ? dpreds_orig_pos[i] : gidx];
tp += wt * dlabels[dpreds_orig_pos[i]];
fp += wt * (1.0f - dlabels[dpreds_orig_pos[i]]);
if ((i < gend - 1 && dsorted_preds[i] != dsorted_preds[i + 1]) || (i == gend - 1)) {
if (tp == prevtp) {
a = 1.0;
b = 0.0;
} else {
h = (fp - prevfp) / (tp - prevtp);
a = 1.0 + h;
b = (prevfp - h * prevtp) / dtotal_pos[gidx];
}
if (0.0 != b) {
dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx] -
b / a * (std::log(a * tp / dtotal_pos[gidx] + b) -
std::log(a * prevtp / dtotal_pos[gidx] + b))) / a;
} else {
dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx]) / a;
}
prevtp = tp;
prevfp = fp;
}
}
// Sanity check
if (tp < 0 || prevtp < 0 || fp < 0 || prevfp < 0) {
// Check if we have any metric error thus far
auto current_auc_error = atomicAdd(dauc_error, 0);
KERNEL_CHECK(!current_auc_error);
}
}
});
const auto hsum_auc = thrust::reduce(thrust::cuda::par(alloc), sum_auc.begin(), sum_auc.end());
const auto hauc_error = auc_error.back(); // Copy it back to host
// Report average AUC-PR across all groups
// In distributed mode, workers which only contains pos or neg samples
// will be ignored when aggregate AUC-PR.
bst_float dat[2] = {0.0f, 0.0f};
if (hauc_error < static_cast<int>(ngroups)) {
dat[0] = static_cast<bst_float>(hsum_auc);
dat[1] = static_cast<bst_float>(static_cast<int>(ngroups) - hauc_error);
}
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
CHECK_GT(dat[1], 0.0f)
<< "AUC-PR: the dataset only contains pos or neg samples";
CHECK_LE(dat[0], dat[1]) << "AUC-PR: AUC > 1.0";
return dat[0] / dat[1];
}
const char* Name() const override {
return "aucpr";
}
};
XGBOOST_REGISTER_GPU_METRIC(AucGpu, "auc")
.describe("Area under curve for rank computed on GPU.")
.set_body([](const char* param) { return new EvalAucGpu(); });
XGBOOST_REGISTER_GPU_METRIC(AucPRGpu, "aucpr")
.describe("Area under PR curve for rank computed on GPU.")
.set_body([](const char* param) { return new EvalAucPRGpu(); });
XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre")
.describe("precision@k for rank computed on GPU.")
.set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); });
XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg")
.describe("ndcg@k for rank computed on GPU.")
.set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); });
XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map")
.describe("map@k for rank computed on GPU.")
.set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); });
} // namespace metric
} // namespace xgboost
|
d6a1c651e4653fe3988e5b207a68b0fe07f7268a.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Tingxing Dong
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_d
#include "gemv_template_kernel_batched_hip.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/***************************************************************************//**
Purpose
-------
DGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE PRECISION
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv_batched
*******************************************************************************/
extern "C" void
magmablas_dgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dA_array[], magma_int_t ldda,
magmaDouble_ptr dx_array[], magma_int_t incx,
double beta,
magmaDouble_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat or square matrix
if ( m <= 16)
{
gemvn_template_batched<double, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<double, version(N, 100)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<double, version(N, 122)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // Tall or square matrix
if ( n <= 16)
{
gemvn_template_batched<double, version(N, 128)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( n <= 64)
{
gemvn_template_batched<double, version(N, 132)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 8)
{
gemvn_template_batched<double, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 16)
{
gemvn_template_batched<double, version(N, 70)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 104)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 124)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // (m > n) Tall matrix
if (m <= 256)
{
gemvn_template_batched<double, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) { // small size
if (m <= 16)
{
gemvc_template_batched<double, version(T, 42)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // big size
if (m <= n) { // Fat or square matrix
if (m <= 64)
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // (m > n) Tall matrix
if (n <= 64)
{
gemvc_template_batched<double, version(T, 90)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
| d6a1c651e4653fe3988e5b207a68b0fe07f7268a.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Tingxing Dong
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_d
#include "gemv_template_kernel_batched.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/***************************************************************************//**
Purpose
-------
DGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE PRECISION
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv_batched
*******************************************************************************/
extern "C" void
magmablas_dgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dA_array[], magma_int_t ldda,
magmaDouble_ptr dx_array[], magma_int_t incx,
double beta,
magmaDouble_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat or square matrix
if ( m <= 16)
{
gemvn_template_batched<double, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<double, version(N, 100)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<double, version(N, 122)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // Tall or square matrix
if ( n <= 16)
{
gemvn_template_batched<double, version(N, 128)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( n <= 64)
{
gemvn_template_batched<double, version(N, 132)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 8)
{
gemvn_template_batched<double, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 16)
{
gemvn_template_batched<double, version(N, 70)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 104)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<double, version(N, 124)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // (m > n) Tall matrix
if (m <= 256)
{
gemvn_template_batched<double, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<double, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) { // small size
if (m <= 16)
{
gemvc_template_batched<double, version(T, 42)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // big size
if (m <= n) { // Fat or square matrix
if (m <= 64)
{
gemvc_template_batched<double, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
} else { // (m > n) Tall matrix
if (n <= 64)
{
gemvc_template_batched<double, version(T, 90)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<double, version(T, 91)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
|
77cc1800f2802554051ceb503eb27d23fba87fce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int r = threadIdx.x;
int c = blockIdx.x;
int idx = r * numCols + c;
float f = .299 * rgbaImage[idx].x + 0.587 * rgbaImage[idx].y + 0.114 * rgbaImage[idx].z;
greyImage[idx] = (unsigned char) f;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1); //TODO
const dim3 gridSize(numCols, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 77cc1800f2802554051ceb503eb27d23fba87fce.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int r = threadIdx.x;
int c = blockIdx.x;
int idx = r * numCols + c;
float f = .299 * rgbaImage[idx].x + 0.587 * rgbaImage[idx].y + 0.114 * rgbaImage[idx].z;
greyImage[idx] = (unsigned char) f;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numRows, 1, 1); //TODO
const dim3 gridSize(numCols, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
7b926266b22654fd1721bd365ea32dead5bca9ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 7b926266b22654fd1721bd365ea32dead5bca9ba.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<1, 1>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
7c9d3ad0a65c7e2adc4bda5a610467f14775659d.hip | // !!! This is a file automatically generated by hipify!!!
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
extern "C" {
#include "lab4.h"
}
#define COEFFS(Cu,Cv,u,v) {\
if (u==0) Cu = 1/sqrt(2); else Cu = 1.0;\
if (v==0) Cv = 1/sqrt(2); else Cv = 1.0;}
//Function prototypes
void Traverse(char *block, char *arr, int col);
void Inverse(char *block, char *arr, int col);
//DCT Kernel
__global__ void parallel_dct(int *ky, int *kcb, int *kcr, double *kdy,
double *kdcb, double *kdcr, int row, int col)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x)*8;
int idy = (threadIdx.y + blockIdx.y * blockDim.y)*8;
int i,j,x;
if (idx < row && idy < col)
{
double temp_dcty[64];
double temp_dctcb[64];
double temp_dctcr[64];
double DCT[8][8] = {0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536,
0.4904, 0.4157, 0.2778, 0.0975, -0.0975, -0.2778, -0.4157, -0.4904,
0.4619, 0.1913, -0.1913, -0.4619, -0.4619, -0.1913, 0.1913, 0.4619,
0.4157, -0.0975, -0.4904, -0.2778, 0.2778, 0.4904, 0.0975, -0.4157,
0.3536, -0.3536, -0.3536, 0.3536, 0.3536, -0.3536, -0.3536, 0.3536,
0.2778, -0.4904, 0.0975, 0.4157, -0.4157, -0.0975, 0.4904, -0.2778,
0.1913, -0.4619, 0.4619, -0.1913, -0.1913, 0.4619, -0.4619, 0.1913,
0.0975, -0.2778, 0.4157, -0.4904, 0.4904, -0.4157, 0.2788, -0.0975};
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
temp_dcty[i*8+j] = 0;
temp_dctcb[i*8+j] = 0;
temp_dctcr[i*8+j] = 0;
for(x = 0; x < 8; x++)
{
temp_dcty[i*8+j] += DCT[i][x] * (double)ky[(idx+x) * col + (idy+j)];
temp_dctcb[i*8+j] += DCT[i][x] * (double)kcb[(idx+x) * col + (idy+j)];
temp_dctcr[i*8+j] += DCT[i][x] * (double)kcr[(idx+x) * col + (idy+j)];
}
}
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
kdy[(idx+i) * col + (idy+j)] = 0;
kdcb[(idx+i) * col + (idy+j)] = 0;
kdcr[(idx+i) * col + (idy+j)] = 0;
for(x = 0; x < 8; x++)
{
kdy[(idx+i) * col + (idy+j)] += temp_dcty[i*8+x] * DCT[j][x];
kdcb[(idx+i) * col + (idy+j)] += temp_dctcb[i*8+x] * DCT[j][x];
kdcr[(idx+i) * col + (idy+j)] += temp_dctcr[i*8+x] * DCT[j][x];
}
}
}
}
//IDCT Kernel
__global__ void parallel_idct(int *ky, int *kcb, int *kcr, double *kdy,
double *kdcb, double *kdcr, int row, int col)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x)*8;
int idy = (threadIdx.y + blockIdx.y * blockDim.y)*8;
if (idx < row && idy < col)
{
double temp_dcty[64];
double temp_dctcb[64];
double temp_dctcr[64];
int i,j,x;
double IDCT[8][8] = {0.3535, 0.4905, 0.4620, 0.4157, 0.3535, 0.2778, 0.1914, 0.0975,
0.3536, 0.4156, 0.1914, -0.0975, -0.3536, -0.4903, -0.4621, -0.2777,
0.3534, 0.2780, -0.1914, -0.4905, -0.3534, 0.0973, 0.4622, 0.4156,
0.3537, 0.0973, -0.4619, -0.2778, 0.3533, 0.4160, -0.1916, -0.4903,
0.3533, -0.0973, -0.4621, 0.2778, 0.3537, -0.4160, -0.1911, 0.4903,
0.3537, -0.2780, -0.1913, 0.4905, -0.3537, -0.0973, 0.4618, -0.4156,
0.3534, -0.4156, 0.1913, 0.0975, -0.3534, 0.4903, -0.4619, 0.2777,
0.3535, -0.4905, 0.4620, -0.4157, 0.3535, -0.2778, 0.1913, -0.0975};
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
temp_dcty[i*8+j] = 0;
temp_dctcb[i*8+j] = 0;
temp_dctcr[i*8+j] = 0;
for(x = 0; x < 8; x++)
{
temp_dcty[i*8+j] += IDCT[i][x] * kdy[(idx+x)*col + (idy+j)];
temp_dctcb[i*8+j] += IDCT[i][x] * kdcb[(idx+x)*col + (idy+j)];
temp_dctcr[i*8+j] += IDCT[i][x] * kdcr[(idx+x)*col + (idy+j)];
}
}
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
ky[(idx+i) * col + (idy+j)] = 0;
kcb[(idx+i) * col + (idy+j)] = 0;
kcr[(idx+i) * col + (idy+j)] = 0;
for(x = 0; x < 8; x++)
{
ky[(idx+i) * col + (idy+j)] += (int)(temp_dcty[i*8+x] * IDCT[j][x]);
kcb[(idx+i) * col + (idy+j)] += (int)(temp_dctcb[i*8+x] * IDCT[j][x]);
kcr[(idx+i) * col + (idy+j)] += (int)(temp_dctcr[i*8+x] * IDCT[j][x]);
}
}
}
}
int main (int argc, char **argv)
{
//Timing variables
struct timeval startrgb, endrgb, startdct, enddct, startquant, endquant, starthuff, endhuff, startcmptot, endcmptot;
struct timeval startirgb, endirgb, startidct, endidct, startiquant, endiquant, startihuff, endihuff, startdectot, enddectot;
struct timeval starttot, endtot;
gettimeofday(&starttot, NULL);
gettimeofday(&startcmptot, NULL);
//Declaration of variables
FILE *f_in = fopen(argv[1], "r");
FILE *f_out = fopen("output.ppm", "w");
char img_type[16];
int row, col, char_val, orig_row, orig_col;
int c, i, j, k, m, n;
int counter = 0;
//double temp_y, temp_cb, temp_cr;
//double Ci, Cj;
//Quantization Matrix
unsigned char Q[8][8] = { 16, 11, 10, 16, 24, 40, 51, 61,
12, 12, 14, 19, 26, 58, 60, 55,
14, 13, 16, 24, 40, 57, 69, 56,
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68, 109, 103, 77,
24, 35, 55, 64, 81, 104, 113, 92,
49, 64, 78, 87, 103, 121, 120, 101,
72, 92, 95, 98, 112, 100, 103, 99};
//Parse header
fscanf(f_in, "%s\n", img_type);
fscanf(f_in, "%d %d\n", &orig_col, &orig_row);
fscanf(f_in, "%d\n", &char_val);
//Pad row and col if necessary
col = orig_col;
row = orig_row;
if(orig_col % 8 != 0)
col = orig_col + (8-(orig_col % 8));
if(orig_row % 8 != 0)
row = orig_row + (8-(orig_row % 8));
int img_size = col*row;
//Full RGB matrix
unsigned char *img_c = (unsigned char *)calloc(row*col*3, sizeof(unsigned char));
//Separate YCbCr matrices
int *img_y = (int *)calloc(img_size*2, sizeof(int));
int *img_cb = (int *)calloc(img_size*2, sizeof(int));
int *img_cr = (int *)calloc(img_size*2, sizeof(int));
//Kernel YCbCr matrices
int *ky, *kcb, *kcr;
hipMalloc((void **)&ky, img_size*2*sizeof(int));
hipMalloc((void **)&kcb, img_size*2*sizeof(int));
hipMalloc((void **)&kcr, img_size*2*sizeof(int));
//Discrete cosine transform matrices
double *img_dy = (double *)calloc(img_size*2, sizeof(double));
double *img_dcb = (double *)calloc(img_size*2, sizeof(double));
double *img_dcr = (double *)calloc(img_size*2, sizeof(double));
//Kernel DCT matrices
double *kdy, *kdcb, *kdcr;
hipMalloc((void **)&kdy, img_size*2*sizeof(double));
hipMalloc((void **)&kdcb, img_size*2*sizeof(double));
hipMalloc((void **)&kdcr, img_size*2*sizeof(double));
//Quantization matrices
char *img_qy = (char *)calloc(img_size*2, sizeof(char));
char *img_qcb = (char *)calloc(img_size*2, sizeof(char));
char *img_qcr = (char *)calloc(img_size*2, sizeof(char));
//Temp arrays for Traverse()
char *trav_arr_qy = (char *)calloc(64, sizeof(char));
char *trav_arr_qcb = (char *)calloc(64, sizeof(char));
char *trav_arr_qcr = (char *)calloc(64, sizeof(char));
//Temp arrays for DCT matrix mult
// double *temp_dcty = (double *)calloc(64, sizeof(double));
// double *temp_dctcb = (double *)calloc(64, sizeof(double));
// double *temp_dctcr = (double *)calloc(64, sizeof(double));
//Rearranged matrix for huffman (1D)
char *huff = (char *)calloc(row*col*3, sizeof(char));
//Read in pixel data
for(i=0; i<orig_row; i++)
for(j=0; j<orig_col*3; j++)
fscanf(f_in, "%c", &img_c[i*col*3 + j]);
// for(m=0;m<64;m++){
// if(m%8==0)
// printf("\n");
// printf("%6d",img_c[m]);
// }
//RBG -> YCbCr
gettimeofday(&startrgb, NULL);
for(i=0; i<row; i++)
for(j=0,k=0; j<col*3; j+=3,k++)
{
img_y[i * col + k] = (0.299)*img_c[i*col*3 + j] + (0.587)*img_c[i*col*3 + (j+1)] + (0.114)*img_c[i*col*3 + (j+2)];
img_cb[i * col + k] = 128 - (0.168736)*img_c[i*col*3 + j] - (0.331264)*img_c[i*col*3 + (j+1)] + (0.5)*img_c[i*col*3 + (j+2)];
img_cr[i * col + k] = 128 + (0.5)*img_c[i*col*3 + j] - (0.418688)*img_c[i*col*3 + (j+1)] - (0.081312)*img_c[i*col*3 + (j+2)];
}
//Center
for(i=0; i<row; i++)
for(j=0; j<col; j++)
{
img_y[i * col + j] -= 128;
img_cb[i * col + j] -= 128;
img_cr[i * col + j] -= 128;
}
gettimeofday(&endrgb, NULL);
// printf("Before DCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%4d", img_cr[m*col+n]);
// printf("\n");
// }
//Copying YCbCr to Device
hipMemcpy( ky, img_y, img_size*2*sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( kcb, img_cb, img_size*2*sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( kcr, img_cr, img_size*2*sizeof(int), hipMemcpyHostToDevice );
dim3 dimGrid(ceil((double)col / 8),
ceil((double)row / 8),1);
dim3 dimBlock(8, 8, 1);
//Discrete Cosine Transform
gettimeofday(&startdct, NULL);
hipLaunchKernelGGL(( parallel_dct) , dim3(dimGrid), dim3(dimBlock) , 0, 0, ky, kcb, kcr, kdy, kdcb, kdcr, row, col);
hipDeviceSynchronize();
gettimeofday(&enddct, NULL);
//Copying results back from device
hipMemcpy( img_dy, kdy, img_size*2*sizeof(double), hipMemcpyDeviceToHost );
hipMemcpy( img_dcb, kdcb, img_size*2*sizeof(double), hipMemcpyDeviceToHost );
hipMemcpy( img_dcr, kdcr, img_size*2*sizeof(double), hipMemcpyDeviceToHost );
// hipFree(ky);
// hipFree(kcb);
// hipFree(kcr);
// hipFree(kdy);
// hipFree(kdcb);
// hipFree(kdcr);
// printf("After DCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6.2f", img_dcr[m*col+n]);
// printf("\n");
// }
//Quantization
gettimeofday(&startquant, NULL);
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8)
for(i=0; i<8; i++)
for(j=0; j<8; j++)
{
img_qy[(m+i) * col + (n+j)] = (char)rint((img_dy[(m+i)*col + (n+j)]/Q[i][j]));
img_qcb[(m+i) * col + (n+j)] = (char)rint((img_dcb[(m+i)*col + (n+j)]/Q[i][j]));
img_qcr[(m+i) * col + (n+j)] = (char)rint((img_dcr[(m+i)*col + (n+j)]/Q[i][j]));
}
gettimeofday(&endquant, NULL);
// printf("After Quant\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6d", img_qcr[m*row+n]);
// printf("\n");
// }
//Linearization of each 8x8 block before compression
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8)
{
//Linearization
Traverse(img_qy+(m*col+n), trav_arr_qy, col);
Traverse(img_qcb+(m*col+n), trav_arr_qcb, col);
Traverse(img_qcr+(m*col+n), trav_arr_qcr, col);
//Combination into single Huffman array
for(c=0; c<64; c++, counter++)
{
huff[counter] = trav_arr_qy[c];
huff[col*row+counter] = trav_arr_qcb[c];
huff[col*row*2+counter] = trav_arr_qcr[c];
}
}
// printf("After Traverse");
// for(m=row*col*2;m<row*col*2+64;m++){
// if(m%8 == 0)
// printf("\n");
// printf("%6d", huff[m]);
// }
// printf("\n");
//Write out combined matrix to "output.ppm"
fprintf(f_out, "%s\n", img_type);
fprintf(f_out, "%d %d\n", orig_col, orig_row);
fprintf(f_out, "%d\n", char_val);
for(m=0; m<row*col*3; m++)
fprintf(f_out, "%c", huff[m]);
fclose(f_in);
fclose(f_out);
//Huffman Compression
gettimeofday(&starthuff, NULL);
//system("./huff -c output.ppm output.ppm.huf");
char arg1[] = "c";
char arg2[] = "output.ppm";
char arg3[] = "output.ppm.huf";
lab4(arg1, arg2, arg3);
gettimeofday(&endhuff, NULL);
gettimeofday(&endcmptot, NULL);
//Timing outputs in milliseconds
// printf("file: %s\n",argv[1]);
printf("\n");
printf("%6.3f,", (double)(endrgb.tv_usec - startrgb.tv_usec) / 1000000 + (endrgb.tv_sec - startrgb.tv_sec));
printf("%6.3f,", (double)(enddct.tv_usec - startdct.tv_usec) / 1000000 + (enddct.tv_sec - startdct.tv_sec));
printf("%6.3f,", (double)(endquant.tv_usec - startquant.tv_usec) / 1000000 + (endquant.tv_sec - startquant.tv_sec));
printf("%6.3f,", (double)(endhuff.tv_usec - starthuff.tv_usec) / 1000000 + (endhuff.tv_sec - starthuff.tv_sec));
printf("%6.3f\n", (double)(endcmptot.tv_usec - startcmptot.tv_usec) / 1000000 + (endcmptot.tv_sec - startcmptot.tv_sec));
gettimeofday(&startdectot, NULL);
//printf("Decompressing...\n");
//Huffman Decompression
gettimeofday(&startihuff, NULL);
char arg4[] = "d";
char arg5[] = "output.ppm.uhuf";
lab4(arg4, arg3, arg5);
gettimeofday(&endihuff, NULL);
//End of JPEG encoding
//////////////////////////////////////////////////////////////////////////////
// BEGIN DECODING OF UNCOMPRESSED JPEG IMAGE
//////////////////////////////////////////////////////////////////////////////
//Open uncompressed huffman file
FILE *g_in = fopen("output.ppm.uhuf", "r");
FILE *g_out = fopen("result.ppm", "w");
//Parse header
fscanf(g_in, "%s\n", img_type);
fscanf(g_in, "%d %d\n", &orig_col, &orig_row);
fscanf(g_in, "%d\n", &char_val);
col = orig_col;
row = orig_row;
if(orig_col % 8 != 0)
col = orig_col + (8-(orig_col % 8));
if(orig_row % 8 != 0)
row = orig_row + (8-(orig_row % 8));
//Prepare huffman array
memset(huff, 0, row*col*3*sizeof(char));
//printf("Reading in file\n");
//Read in file contents
for(m=0; m<row*col*3; m++)
fscanf(g_in, "%c", &huff[m]);
// printf("Before ITraverse");
// for(m=row*col*2;m<row*col*2+64;m++){
// if(m%8 == 0)
// printf("\n");
// printf("%6d", huff[m]);
// }
// printf("\n");
//printf("Reverse Linear\n");
//Reverse the linearization
counter = 0;
for(i=0; i<3; i++)
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8, counter++)
if(i==0)
Inverse(&img_qy[m * col + n], (huff + counter*64), col);
else if(i==1)
Inverse(&img_qcb[m * col + n], (huff + counter*64), col);
else
Inverse(&img_qcr[m * col + n], (huff + counter*64), col);
// printf("Before IQuant\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6d", img_qcr[m*row+n]);
// printf("\n");
// }
//printf("IQuant\n");
gettimeofday(&startiquant, NULL);
//Inverse Quantization
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8)
for (i=0; i<8; i++)
for (j=0; j<8; j++)
{
img_dy[(m+i)*col + (n+j)] = (img_qy[(m+i)*col + (n+j)]*Q[i][j]);
img_dcb[(m+i)*col + (n+j)] = (img_qcb[(m+i)*col + (n+j)]*Q[i][j]);
img_dcr[(m+i)*col + (n+j)] = (img_qcr[(m+i)*col + (n+j)]*Q[i][j]);
}
gettimeofday(&endiquant, NULL);
// printf("Before IDCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6.2f", img_dcr[m*col+n]);
// printf("\n");
// }
//Iniatializing Device matrices
// hipMalloc((void **)&kdy, img_size*2*sizeof(double));
// hipMalloc((void **)&kdcb, img_size*2*sizeof(double));
// hipMalloc((void **)&kdcr, img_size*2*sizeof(double));
// hipMalloc((void **)&ky, img_size*2*sizeof(int));
// hipMalloc((void **)&kcb, img_size*2*sizeof(int));
// hipMalloc((void **)&kcr, img_size*2*sizeof(int));
//Copying DCT matrices to device memory
hipMemcpy( kdy, img_dy, img_size*2*sizeof(double), hipMemcpyHostToDevice );
hipMemcpy( kdcb, img_dcb, img_size*2*sizeof(double), hipMemcpyHostToDevice );
hipMemcpy( kdcr, img_dcr, img_size*2*sizeof(double), hipMemcpyHostToDevice );
gettimeofday(&startidct, NULL);
// IDCThipLaunchKernelGGL((
parallel_idct) , dim3(dimGrid), dim3(dimBlock) , 0, 0, ky, kcb, kcr, kdy, kdcb, kdcr, row, col);
hipDeviceSynchronize();
gettimeofday(&endidct, NULL);
hipMemcpy( img_y, ky, img_size*2*sizeof(int), hipMemcpyDeviceToHost );
hipMemcpy( img_cb, kcb, img_size*2*sizeof(int), hipMemcpyDeviceToHost );
hipMemcpy( img_cr, kcr, img_size*2*sizeof(int), hipMemcpyDeviceToHost );
// printf("After IDCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6d", img_cr[m*col+n]);
// printf("\n");
// }
gettimeofday(&startirgb, NULL);
//Un-Center
for(i=0; i<row; i++)
for(j=0; j<col; j++)
{
img_y[i * col + j] += 128;
img_cb[i * col + j] += 128;
img_cr[i * col + j] += 128;
}
// printf("YCbCr[0][0]\n");
// printf("%8d %8d %8d\n",img_y[0][0], img_cb[0][0], img_cr[0][0]);
int tempr, tempg, tempb;
// printf("YCbCr->RGB\n");
//YCbCr back to RGB
for(m=0; m<row; m++)
for(n=0, j=0; n<col; n++, j+=3)
{
tempr = (img_y[m * col + n] + 1.40200 * (img_cr[m * col + n] - 128));
tempg = (img_y[m * col + n] - 0.34414 * (img_cb[m * col + n] - 128) - 0.71414 * (img_cr[m * col + n] - 128));
tempb = (img_y[m * col + n] + 1.77200 * (img_cb[m * col + n] - 128));
if(tempr > 255)
tempr = 255;
if(tempr < 0)
tempr = 0;
if(tempg > 255)
tempg = 255;
if(tempg < 0)
tempg = 0;
if(tempb > 255)
tempb = 255;
if(tempb < 0)
tempb = 0;
img_c[m*col*3 + j] = (unsigned char)tempr;
img_c[m*col*3 + (j+1)] = (unsigned char)tempg;
img_c[m*col*3 + (j+2)] = (unsigned char)tempb;
}
gettimeofday(&endirgb, NULL);
// printf("RGB[0][0]\n");
// printf("%8d %8d %8d\n",img_c[0][0], img_c[0][1], img_c[0][2]);
// printf("Write out\n");
//Write out the final, reconstructed RGB image
fprintf(g_out, "%s\n", img_type);
fprintf(g_out, "%d %d\n", orig_col, orig_row);
fprintf(g_out, "%d\n", char_val);
for(i=0; i<orig_row; i++)
for(j=0; j<orig_col*3; j++)
fprintf(g_out, "%c", img_c[i*col*3 + j]);
// for(m=0;m<64;m++){
// if(m%8==0)
// printf("\n");
// printf("%6d",img_c[m]);
// }
//Clean up
fclose(g_in);
fclose(g_out);
gettimeofday(&enddectot, NULL);
printf("\n");
printf("%6.3f,", (double)(endirgb.tv_usec - startirgb.tv_usec) / 1000000 + (endirgb.tv_sec - startirgb.tv_sec));
printf("%6.3f,", (double)(endidct.tv_usec - startidct.tv_usec) / 1000000 + (endidct.tv_sec - startidct.tv_sec));
printf("%6.3f,", (double)(endiquant.tv_usec - startiquant.tv_usec) / 1000000 + (endiquant.tv_sec - startiquant.tv_sec));
printf("%6.3f,", (double)(endihuff.tv_usec - startihuff.tv_usec) / 1000000 + (endihuff.tv_sec - startihuff.tv_sec));
printf("%6.3f\n", (double)(enddectot.tv_usec - startdectot.tv_usec) / 1000000 + (enddectot.tv_sec - startdectot.tv_sec));
// printf("freeing memory\n");
//Free allocated memory
hipFree(ky);
hipFree(kcb);
hipFree(kcr);
hipFree(kdy);
hipFree(kdcb);
hipFree(kdcr);
free(img_c);
free(img_y);
free(img_cb);
free(img_cr);
free(img_dy);
free(img_dcb);
free(img_dcr);
// free(img_qy);
// free(img_qcb);
// free(img_qcr);
free(trav_arr_qy);
free(trav_arr_qcb);
free(trav_arr_qcr);
free(huff);
// system("rm output.ppm.huf output.ppm.uhuf");
gettimeofday(&endtot, NULL);
// printf("%s,%d,%d,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f\n",
// argv[1],orig_col,orig_row,
// (double)(endrgb.tv_usec - startrgb.tv_usec) / 1000000 + (endrgb.tv_sec - startrgb.tv_sec),
// (double)(enddct.tv_usec - startdct.tv_usec) / 1000000 + (enddct.tv_sec - startdct.tv_sec),
// (double)(endquant.tv_usec - startquant.tv_usec) / 1000000 + (endquant.tv_sec - startquant.tv_sec),
// (double)(endhuff.tv_usec - starthuff.tv_usec) / 1000000 + (endhuff.tv_sec - starthuff.tv_sec),
// (double)(endcmptot.tv_usec - startcmptot.tv_usec) / 1000000 + (endcmptot.tv_sec - startcmptot.tv_sec),
// (double)(endirgb.tv_usec - startirgb.tv_usec) / 1000000 + (endirgb.tv_sec - startirgb.tv_sec),
// (double)(endidct.tv_usec - startidct.tv_usec) / 1000000 + (endidct.tv_sec - startidct.tv_sec),
// (double)(endiquant.tv_usec - startiquant.tv_usec) / 1000000 + (endiquant.tv_sec - startiquant.tv_sec),
// (double)(endihuff.tv_usec - startihuff.tv_usec) / 1000000 + (endihuff.tv_sec - startihuff.tv_sec),
// (double)(enddectot.tv_usec - startdectot.tv_usec) / 1000000 + (enddectot.tv_sec - startdectot.tv_sec),
// (double)(endtot.tv_usec - starttot.tv_usec) / 1000000 + (endtot.tv_sec - starttot.tv_sec));
//Scott is n00b
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// FUNCTION DEFINITIONS of Traverse() and Inverse()
////////////////////////////////////////////////////////////////////////////////
void Traverse(char *block, char *arr, int col)
{
int count = 0;
int r = 0;
int c = 0;
while(count < 64)
{
if(c < 7)
{
arr[count++] = block[(r*col) + (c++)];
if(count == 64)
break;
}
else
arr[count++] = block[(r++)*col + c];
while((r<7) && (c>0))
arr[count++] = block[(r++)*col + (c--)];
if(r < 7)
arr[count++] = block[(r++)*col + c];
else
arr[count++] = block[(r*col) + (c++)];
while((r>0) && (c<7))
arr[count++] = block[(r--)*col + (c++)];
}
}
void Inverse(char *block, char *arr, int col)
{
int count = 0;
int r = 0;
int c = 0;
while(count < 64)
{
if(c < 7)
{
block[(r*col) + (c++)] = arr[count++];
if(count == 64)
break;
}
else
block[(r++)*col + c] = arr[count++];
while((r<7) && (c>0))
block[(r++)*col + (c--)] = arr[count++];
if(r < 7)
block[(r++)*col + c] = arr[count++];
else
block[(r*col) + (c++)] = arr[count++];
while((r>0) && (c<7))
block[(r--)*col + (c++)] = arr[count++];
}
}
| 7c9d3ad0a65c7e2adc4bda5a610467f14775659d.cu | #include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" {
#include "lab4.h"
}
#define COEFFS(Cu,Cv,u,v) {\
if (u==0) Cu = 1/sqrt(2); else Cu = 1.0;\
if (v==0) Cv = 1/sqrt(2); else Cv = 1.0;}
//Function prototypes
void Traverse(char *block, char *arr, int col);
void Inverse(char *block, char *arr, int col);
//DCT Kernel
__global__ void parallel_dct(int *ky, int *kcb, int *kcr, double *kdy,
double *kdcb, double *kdcr, int row, int col)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x)*8;
int idy = (threadIdx.y + blockIdx.y * blockDim.y)*8;
int i,j,x;
if (idx < row && idy < col)
{
double temp_dcty[64];
double temp_dctcb[64];
double temp_dctcr[64];
double DCT[8][8] = {0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536,
0.4904, 0.4157, 0.2778, 0.0975, -0.0975, -0.2778, -0.4157, -0.4904,
0.4619, 0.1913, -0.1913, -0.4619, -0.4619, -0.1913, 0.1913, 0.4619,
0.4157, -0.0975, -0.4904, -0.2778, 0.2778, 0.4904, 0.0975, -0.4157,
0.3536, -0.3536, -0.3536, 0.3536, 0.3536, -0.3536, -0.3536, 0.3536,
0.2778, -0.4904, 0.0975, 0.4157, -0.4157, -0.0975, 0.4904, -0.2778,
0.1913, -0.4619, 0.4619, -0.1913, -0.1913, 0.4619, -0.4619, 0.1913,
0.0975, -0.2778, 0.4157, -0.4904, 0.4904, -0.4157, 0.2788, -0.0975};
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
temp_dcty[i*8+j] = 0;
temp_dctcb[i*8+j] = 0;
temp_dctcr[i*8+j] = 0;
for(x = 0; x < 8; x++)
{
temp_dcty[i*8+j] += DCT[i][x] * (double)ky[(idx+x) * col + (idy+j)];
temp_dctcb[i*8+j] += DCT[i][x] * (double)kcb[(idx+x) * col + (idy+j)];
temp_dctcr[i*8+j] += DCT[i][x] * (double)kcr[(idx+x) * col + (idy+j)];
}
}
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
kdy[(idx+i) * col + (idy+j)] = 0;
kdcb[(idx+i) * col + (idy+j)] = 0;
kdcr[(idx+i) * col + (idy+j)] = 0;
for(x = 0; x < 8; x++)
{
kdy[(idx+i) * col + (idy+j)] += temp_dcty[i*8+x] * DCT[j][x];
kdcb[(idx+i) * col + (idy+j)] += temp_dctcb[i*8+x] * DCT[j][x];
kdcr[(idx+i) * col + (idy+j)] += temp_dctcr[i*8+x] * DCT[j][x];
}
}
}
}
//IDCT Kernel
__global__ void parallel_idct(int *ky, int *kcb, int *kcr, double *kdy,
double *kdcb, double *kdcr, int row, int col)
{
int idx = (threadIdx.x + blockIdx.x * blockDim.x)*8;
int idy = (threadIdx.y + blockIdx.y * blockDim.y)*8;
if (idx < row && idy < col)
{
double temp_dcty[64];
double temp_dctcb[64];
double temp_dctcr[64];
int i,j,x;
double IDCT[8][8] = {0.3535, 0.4905, 0.4620, 0.4157, 0.3535, 0.2778, 0.1914, 0.0975,
0.3536, 0.4156, 0.1914, -0.0975, -0.3536, -0.4903, -0.4621, -0.2777,
0.3534, 0.2780, -0.1914, -0.4905, -0.3534, 0.0973, 0.4622, 0.4156,
0.3537, 0.0973, -0.4619, -0.2778, 0.3533, 0.4160, -0.1916, -0.4903,
0.3533, -0.0973, -0.4621, 0.2778, 0.3537, -0.4160, -0.1911, 0.4903,
0.3537, -0.2780, -0.1913, 0.4905, -0.3537, -0.0973, 0.4618, -0.4156,
0.3534, -0.4156, 0.1913, 0.0975, -0.3534, 0.4903, -0.4619, 0.2777,
0.3535, -0.4905, 0.4620, -0.4157, 0.3535, -0.2778, 0.1913, -0.0975};
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
temp_dcty[i*8+j] = 0;
temp_dctcb[i*8+j] = 0;
temp_dctcr[i*8+j] = 0;
for(x = 0; x < 8; x++)
{
temp_dcty[i*8+j] += IDCT[i][x] * kdy[(idx+x)*col + (idy+j)];
temp_dctcb[i*8+j] += IDCT[i][x] * kdcb[(idx+x)*col + (idy+j)];
temp_dctcr[i*8+j] += IDCT[i][x] * kdcr[(idx+x)*col + (idy+j)];
}
}
for(i = 0; i < 8; i++)
for(j = 0; j < 8; j++)
{
ky[(idx+i) * col + (idy+j)] = 0;
kcb[(idx+i) * col + (idy+j)] = 0;
kcr[(idx+i) * col + (idy+j)] = 0;
for(x = 0; x < 8; x++)
{
ky[(idx+i) * col + (idy+j)] += (int)(temp_dcty[i*8+x] * IDCT[j][x]);
kcb[(idx+i) * col + (idy+j)] += (int)(temp_dctcb[i*8+x] * IDCT[j][x]);
kcr[(idx+i) * col + (idy+j)] += (int)(temp_dctcr[i*8+x] * IDCT[j][x]);
}
}
}
}
int main (int argc, char **argv)
{
//Timing variables
struct timeval startrgb, endrgb, startdct, enddct, startquant, endquant, starthuff, endhuff, startcmptot, endcmptot;
struct timeval startirgb, endirgb, startidct, endidct, startiquant, endiquant, startihuff, endihuff, startdectot, enddectot;
struct timeval starttot, endtot;
gettimeofday(&starttot, NULL);
gettimeofday(&startcmptot, NULL);
//Declaration of variables
FILE *f_in = fopen(argv[1], "r");
FILE *f_out = fopen("output.ppm", "w");
char img_type[16];
int row, col, char_val, orig_row, orig_col;
int c, i, j, k, m, n;
int counter = 0;
//double temp_y, temp_cb, temp_cr;
//double Ci, Cj;
//Quantization Matrix
unsigned char Q[8][8] = { 16, 11, 10, 16, 24, 40, 51, 61,
12, 12, 14, 19, 26, 58, 60, 55,
14, 13, 16, 24, 40, 57, 69, 56,
14, 17, 22, 29, 51, 87, 80, 62,
18, 22, 37, 56, 68, 109, 103, 77,
24, 35, 55, 64, 81, 104, 113, 92,
49, 64, 78, 87, 103, 121, 120, 101,
72, 92, 95, 98, 112, 100, 103, 99};
//Parse header
fscanf(f_in, "%s\n", img_type);
fscanf(f_in, "%d %d\n", &orig_col, &orig_row);
fscanf(f_in, "%d\n", &char_val);
//Pad row and col if necessary
col = orig_col;
row = orig_row;
if(orig_col % 8 != 0)
col = orig_col + (8-(orig_col % 8));
if(orig_row % 8 != 0)
row = orig_row + (8-(orig_row % 8));
int img_size = col*row;
//Full RGB matrix
unsigned char *img_c = (unsigned char *)calloc(row*col*3, sizeof(unsigned char));
//Separate YCbCr matrices
int *img_y = (int *)calloc(img_size*2, sizeof(int));
int *img_cb = (int *)calloc(img_size*2, sizeof(int));
int *img_cr = (int *)calloc(img_size*2, sizeof(int));
//Kernel YCbCr matrices
int *ky, *kcb, *kcr;
cudaMalloc((void **)&ky, img_size*2*sizeof(int));
cudaMalloc((void **)&kcb, img_size*2*sizeof(int));
cudaMalloc((void **)&kcr, img_size*2*sizeof(int));
//Discrete cosine transform matrices
double *img_dy = (double *)calloc(img_size*2, sizeof(double));
double *img_dcb = (double *)calloc(img_size*2, sizeof(double));
double *img_dcr = (double *)calloc(img_size*2, sizeof(double));
//Kernel DCT matrices
double *kdy, *kdcb, *kdcr;
cudaMalloc((void **)&kdy, img_size*2*sizeof(double));
cudaMalloc((void **)&kdcb, img_size*2*sizeof(double));
cudaMalloc((void **)&kdcr, img_size*2*sizeof(double));
//Quantization matrices
char *img_qy = (char *)calloc(img_size*2, sizeof(char));
char *img_qcb = (char *)calloc(img_size*2, sizeof(char));
char *img_qcr = (char *)calloc(img_size*2, sizeof(char));
//Temp arrays for Traverse()
char *trav_arr_qy = (char *)calloc(64, sizeof(char));
char *trav_arr_qcb = (char *)calloc(64, sizeof(char));
char *trav_arr_qcr = (char *)calloc(64, sizeof(char));
//Temp arrays for DCT matrix mult
// double *temp_dcty = (double *)calloc(64, sizeof(double));
// double *temp_dctcb = (double *)calloc(64, sizeof(double));
// double *temp_dctcr = (double *)calloc(64, sizeof(double));
//Rearranged matrix for huffman (1D)
char *huff = (char *)calloc(row*col*3, sizeof(char));
//Read in pixel data
for(i=0; i<orig_row; i++)
for(j=0; j<orig_col*3; j++)
fscanf(f_in, "%c", &img_c[i*col*3 + j]);
// for(m=0;m<64;m++){
// if(m%8==0)
// printf("\n");
// printf("%6d",img_c[m]);
// }
//RBG -> YCbCr
gettimeofday(&startrgb, NULL);
for(i=0; i<row; i++)
for(j=0,k=0; j<col*3; j+=3,k++)
{
img_y[i * col + k] = (0.299)*img_c[i*col*3 + j] + (0.587)*img_c[i*col*3 + (j+1)] + (0.114)*img_c[i*col*3 + (j+2)];
img_cb[i * col + k] = 128 - (0.168736)*img_c[i*col*3 + j] - (0.331264)*img_c[i*col*3 + (j+1)] + (0.5)*img_c[i*col*3 + (j+2)];
img_cr[i * col + k] = 128 + (0.5)*img_c[i*col*3 + j] - (0.418688)*img_c[i*col*3 + (j+1)] - (0.081312)*img_c[i*col*3 + (j+2)];
}
//Center
for(i=0; i<row; i++)
for(j=0; j<col; j++)
{
img_y[i * col + j] -= 128;
img_cb[i * col + j] -= 128;
img_cr[i * col + j] -= 128;
}
gettimeofday(&endrgb, NULL);
// printf("Before DCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%4d", img_cr[m*col+n]);
// printf("\n");
// }
//Copying YCbCr to Device
cudaMemcpy( ky, img_y, img_size*2*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( kcb, img_cb, img_size*2*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( kcr, img_cr, img_size*2*sizeof(int), cudaMemcpyHostToDevice );
dim3 dimGrid(ceil((double)col / 8),
ceil((double)row / 8),1);
dim3 dimBlock(8, 8, 1);
//Discrete Cosine Transform
gettimeofday(&startdct, NULL);
parallel_dct <<< dimGrid, dimBlock >>>(ky, kcb, kcr, kdy, kdcb, kdcr, row, col);
cudaDeviceSynchronize();
gettimeofday(&enddct, NULL);
//Copying results back from device
cudaMemcpy( img_dy, kdy, img_size*2*sizeof(double), cudaMemcpyDeviceToHost );
cudaMemcpy( img_dcb, kdcb, img_size*2*sizeof(double), cudaMemcpyDeviceToHost );
cudaMemcpy( img_dcr, kdcr, img_size*2*sizeof(double), cudaMemcpyDeviceToHost );
// cudaFree(ky);
// cudaFree(kcb);
// cudaFree(kcr);
// cudaFree(kdy);
// cudaFree(kdcb);
// cudaFree(kdcr);
// printf("After DCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6.2f", img_dcr[m*col+n]);
// printf("\n");
// }
//Quantization
gettimeofday(&startquant, NULL);
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8)
for(i=0; i<8; i++)
for(j=0; j<8; j++)
{
img_qy[(m+i) * col + (n+j)] = (char)rint((img_dy[(m+i)*col + (n+j)]/Q[i][j]));
img_qcb[(m+i) * col + (n+j)] = (char)rint((img_dcb[(m+i)*col + (n+j)]/Q[i][j]));
img_qcr[(m+i) * col + (n+j)] = (char)rint((img_dcr[(m+i)*col + (n+j)]/Q[i][j]));
}
gettimeofday(&endquant, NULL);
// printf("After Quant\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6d", img_qcr[m*row+n]);
// printf("\n");
// }
//Linearization of each 8x8 block before compression
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8)
{
//Linearization
Traverse(img_qy+(m*col+n), trav_arr_qy, col);
Traverse(img_qcb+(m*col+n), trav_arr_qcb, col);
Traverse(img_qcr+(m*col+n), trav_arr_qcr, col);
//Combination into single Huffman array
for(c=0; c<64; c++, counter++)
{
huff[counter] = trav_arr_qy[c];
huff[col*row+counter] = trav_arr_qcb[c];
huff[col*row*2+counter] = trav_arr_qcr[c];
}
}
// printf("After Traverse");
// for(m=row*col*2;m<row*col*2+64;m++){
// if(m%8 == 0)
// printf("\n");
// printf("%6d", huff[m]);
// }
// printf("\n");
//Write out combined matrix to "output.ppm"
fprintf(f_out, "%s\n", img_type);
fprintf(f_out, "%d %d\n", orig_col, orig_row);
fprintf(f_out, "%d\n", char_val);
for(m=0; m<row*col*3; m++)
fprintf(f_out, "%c", huff[m]);
fclose(f_in);
fclose(f_out);
//Huffman Compression
gettimeofday(&starthuff, NULL);
//system("./huff -c output.ppm output.ppm.huf");
char arg1[] = "c";
char arg2[] = "output.ppm";
char arg3[] = "output.ppm.huf";
lab4(arg1, arg2, arg3);
gettimeofday(&endhuff, NULL);
gettimeofday(&endcmptot, NULL);
//Timing outputs in milliseconds
// printf("file: %s\n",argv[1]);
printf("\n");
printf("%6.3f,", (double)(endrgb.tv_usec - startrgb.tv_usec) / 1000000 + (endrgb.tv_sec - startrgb.tv_sec));
printf("%6.3f,", (double)(enddct.tv_usec - startdct.tv_usec) / 1000000 + (enddct.tv_sec - startdct.tv_sec));
printf("%6.3f,", (double)(endquant.tv_usec - startquant.tv_usec) / 1000000 + (endquant.tv_sec - startquant.tv_sec));
printf("%6.3f,", (double)(endhuff.tv_usec - starthuff.tv_usec) / 1000000 + (endhuff.tv_sec - starthuff.tv_sec));
printf("%6.3f\n", (double)(endcmptot.tv_usec - startcmptot.tv_usec) / 1000000 + (endcmptot.tv_sec - startcmptot.tv_sec));
gettimeofday(&startdectot, NULL);
//printf("Decompressing...\n");
//Huffman Decompression
gettimeofday(&startihuff, NULL);
char arg4[] = "d";
char arg5[] = "output.ppm.uhuf";
lab4(arg4, arg3, arg5);
gettimeofday(&endihuff, NULL);
//End of JPEG encoding
//////////////////////////////////////////////////////////////////////////////
// BEGIN DECODING OF UNCOMPRESSED JPEG IMAGE
//////////////////////////////////////////////////////////////////////////////
//Open uncompressed huffman file
FILE *g_in = fopen("output.ppm.uhuf", "r");
FILE *g_out = fopen("result.ppm", "w");
//Parse header
fscanf(g_in, "%s\n", img_type);
fscanf(g_in, "%d %d\n", &orig_col, &orig_row);
fscanf(g_in, "%d\n", &char_val);
col = orig_col;
row = orig_row;
if(orig_col % 8 != 0)
col = orig_col + (8-(orig_col % 8));
if(orig_row % 8 != 0)
row = orig_row + (8-(orig_row % 8));
//Prepare huffman array
memset(huff, 0, row*col*3*sizeof(char));
//printf("Reading in file\n");
//Read in file contents
for(m=0; m<row*col*3; m++)
fscanf(g_in, "%c", &huff[m]);
// printf("Before ITraverse");
// for(m=row*col*2;m<row*col*2+64;m++){
// if(m%8 == 0)
// printf("\n");
// printf("%6d", huff[m]);
// }
// printf("\n");
//printf("Reverse Linear\n");
//Reverse the linearization
counter = 0;
for(i=0; i<3; i++)
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8, counter++)
if(i==0)
Inverse(&img_qy[m * col + n], (huff + counter*64), col);
else if(i==1)
Inverse(&img_qcb[m * col + n], (huff + counter*64), col);
else
Inverse(&img_qcr[m * col + n], (huff + counter*64), col);
// printf("Before IQuant\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6d", img_qcr[m*row+n]);
// printf("\n");
// }
//printf("IQuant\n");
gettimeofday(&startiquant, NULL);
//Inverse Quantization
for(m=0; m<row; m+=8)
for(n=0; n<col; n+=8)
for (i=0; i<8; i++)
for (j=0; j<8; j++)
{
img_dy[(m+i)*col + (n+j)] = (img_qy[(m+i)*col + (n+j)]*Q[i][j]);
img_dcb[(m+i)*col + (n+j)] = (img_qcb[(m+i)*col + (n+j)]*Q[i][j]);
img_dcr[(m+i)*col + (n+j)] = (img_qcr[(m+i)*col + (n+j)]*Q[i][j]);
}
gettimeofday(&endiquant, NULL);
// printf("Before IDCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6.2f", img_dcr[m*col+n]);
// printf("\n");
// }
//Iniatializing Device matrices
// cudaMalloc((void **)&kdy, img_size*2*sizeof(double));
// cudaMalloc((void **)&kdcb, img_size*2*sizeof(double));
// cudaMalloc((void **)&kdcr, img_size*2*sizeof(double));
// cudaMalloc((void **)&ky, img_size*2*sizeof(int));
// cudaMalloc((void **)&kcb, img_size*2*sizeof(int));
// cudaMalloc((void **)&kcr, img_size*2*sizeof(int));
//Copying DCT matrices to device memory
cudaMemcpy( kdy, img_dy, img_size*2*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy( kdcb, img_dcb, img_size*2*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy( kdcr, img_dcr, img_size*2*sizeof(double), cudaMemcpyHostToDevice );
gettimeofday(&startidct, NULL);
// IDCT
parallel_idct <<< dimGrid, dimBlock >>>(ky, kcb, kcr, kdy, kdcb, kdcr, row, col);
cudaDeviceSynchronize();
gettimeofday(&endidct, NULL);
cudaMemcpy( img_y, ky, img_size*2*sizeof(int), cudaMemcpyDeviceToHost );
cudaMemcpy( img_cb, kcb, img_size*2*sizeof(int), cudaMemcpyDeviceToHost );
cudaMemcpy( img_cr, kcr, img_size*2*sizeof(int), cudaMemcpyDeviceToHost );
// printf("After IDCT\n");
// for(m=0;m<8;m++){
// for(n=0;n<8;n++)
// printf("%6d", img_cr[m*col+n]);
// printf("\n");
// }
gettimeofday(&startirgb, NULL);
//Un-Center
for(i=0; i<row; i++)
for(j=0; j<col; j++)
{
img_y[i * col + j] += 128;
img_cb[i * col + j] += 128;
img_cr[i * col + j] += 128;
}
// printf("YCbCr[0][0]\n");
// printf("%8d %8d %8d\n",img_y[0][0], img_cb[0][0], img_cr[0][0]);
int tempr, tempg, tempb;
// printf("YCbCr->RGB\n");
//YCbCr back to RGB
for(m=0; m<row; m++)
for(n=0, j=0; n<col; n++, j+=3)
{
tempr = (img_y[m * col + n] + 1.40200 * (img_cr[m * col + n] - 128));
tempg = (img_y[m * col + n] - 0.34414 * (img_cb[m * col + n] - 128) - 0.71414 * (img_cr[m * col + n] - 128));
tempb = (img_y[m * col + n] + 1.77200 * (img_cb[m * col + n] - 128));
if(tempr > 255)
tempr = 255;
if(tempr < 0)
tempr = 0;
if(tempg > 255)
tempg = 255;
if(tempg < 0)
tempg = 0;
if(tempb > 255)
tempb = 255;
if(tempb < 0)
tempb = 0;
img_c[m*col*3 + j] = (unsigned char)tempr;
img_c[m*col*3 + (j+1)] = (unsigned char)tempg;
img_c[m*col*3 + (j+2)] = (unsigned char)tempb;
}
gettimeofday(&endirgb, NULL);
// printf("RGB[0][0]\n");
// printf("%8d %8d %8d\n",img_c[0][0], img_c[0][1], img_c[0][2]);
// printf("Write out\n");
//Write out the final, reconstructed RGB image
fprintf(g_out, "%s\n", img_type);
fprintf(g_out, "%d %d\n", orig_col, orig_row);
fprintf(g_out, "%d\n", char_val);
for(i=0; i<orig_row; i++)
for(j=0; j<orig_col*3; j++)
fprintf(g_out, "%c", img_c[i*col*3 + j]);
// for(m=0;m<64;m++){
// if(m%8==0)
// printf("\n");
// printf("%6d",img_c[m]);
// }
//Clean up
fclose(g_in);
fclose(g_out);
gettimeofday(&enddectot, NULL);
printf("\n");
printf("%6.3f,", (double)(endirgb.tv_usec - startirgb.tv_usec) / 1000000 + (endirgb.tv_sec - startirgb.tv_sec));
printf("%6.3f,", (double)(endidct.tv_usec - startidct.tv_usec) / 1000000 + (endidct.tv_sec - startidct.tv_sec));
printf("%6.3f,", (double)(endiquant.tv_usec - startiquant.tv_usec) / 1000000 + (endiquant.tv_sec - startiquant.tv_sec));
printf("%6.3f,", (double)(endihuff.tv_usec - startihuff.tv_usec) / 1000000 + (endihuff.tv_sec - startihuff.tv_sec));
printf("%6.3f\n", (double)(enddectot.tv_usec - startdectot.tv_usec) / 1000000 + (enddectot.tv_sec - startdectot.tv_sec));
// printf("freeing memory\n");
//Free allocated memory
cudaFree(ky);
cudaFree(kcb);
cudaFree(kcr);
cudaFree(kdy);
cudaFree(kdcb);
cudaFree(kdcr);
free(img_c);
free(img_y);
free(img_cb);
free(img_cr);
free(img_dy);
free(img_dcb);
free(img_dcr);
// free(img_qy);
// free(img_qcb);
// free(img_qcr);
free(trav_arr_qy);
free(trav_arr_qcb);
free(trav_arr_qcr);
free(huff);
// system("rm output.ppm.huf output.ppm.uhuf");
gettimeofday(&endtot, NULL);
// printf("%s,%d,%d,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f,%0.3f\n",
// argv[1],orig_col,orig_row,
// (double)(endrgb.tv_usec - startrgb.tv_usec) / 1000000 + (endrgb.tv_sec - startrgb.tv_sec),
// (double)(enddct.tv_usec - startdct.tv_usec) / 1000000 + (enddct.tv_sec - startdct.tv_sec),
// (double)(endquant.tv_usec - startquant.tv_usec) / 1000000 + (endquant.tv_sec - startquant.tv_sec),
// (double)(endhuff.tv_usec - starthuff.tv_usec) / 1000000 + (endhuff.tv_sec - starthuff.tv_sec),
// (double)(endcmptot.tv_usec - startcmptot.tv_usec) / 1000000 + (endcmptot.tv_sec - startcmptot.tv_sec),
// (double)(endirgb.tv_usec - startirgb.tv_usec) / 1000000 + (endirgb.tv_sec - startirgb.tv_sec),
// (double)(endidct.tv_usec - startidct.tv_usec) / 1000000 + (endidct.tv_sec - startidct.tv_sec),
// (double)(endiquant.tv_usec - startiquant.tv_usec) / 1000000 + (endiquant.tv_sec - startiquant.tv_sec),
// (double)(endihuff.tv_usec - startihuff.tv_usec) / 1000000 + (endihuff.tv_sec - startihuff.tv_sec),
// (double)(enddectot.tv_usec - startdectot.tv_usec) / 1000000 + (enddectot.tv_sec - startdectot.tv_sec),
// (double)(endtot.tv_usec - starttot.tv_usec) / 1000000 + (endtot.tv_sec - starttot.tv_sec));
//Scott is n00b
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// FUNCTION DEFINITIONS of Traverse() and Inverse()
////////////////////////////////////////////////////////////////////////////////
void Traverse(char *block, char *arr, int col)
{
int count = 0;
int r = 0;
int c = 0;
while(count < 64)
{
if(c < 7)
{
arr[count++] = block[(r*col) + (c++)];
if(count == 64)
break;
}
else
arr[count++] = block[(r++)*col + c];
while((r<7) && (c>0))
arr[count++] = block[(r++)*col + (c--)];
if(r < 7)
arr[count++] = block[(r++)*col + c];
else
arr[count++] = block[(r*col) + (c++)];
while((r>0) && (c<7))
arr[count++] = block[(r--)*col + (c++)];
}
}
void Inverse(char *block, char *arr, int col)
{
int count = 0;
int r = 0;
int c = 0;
while(count < 64)
{
if(c < 7)
{
block[(r*col) + (c++)] = arr[count++];
if(count == 64)
break;
}
else
block[(r++)*col + c] = arr[count++];
while((r<7) && (c>0))
block[(r++)*col + (c--)] = arr[count++];
if(r < 7)
block[(r++)*col + c] = arr[count++];
else
block[(r*col) + (c++)] = arr[count++];
while((r>0) && (c<7))
block[(r--)*col + (c++)] = arr[count++];
}
}
|
e1642b3068c42e6d6e2a2067026a83f30ddbc965.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Project
//
#include "CUDA_Conductivity_matching.h"
#include "CUDA_Conductivity_matching_functions.h"
//
//
//
Domains::CUDA_Conductivity_matching::CUDA_Conductivity_matching():
positions_array_x_(NULL), positions_array_y_(NULL), positions_array_z_(NULL)
{
}
//
//
//
Domains::CUDA_Conductivity_matching::CUDA_Conductivity_matching( int Size_of_array,
float* Voxel_center_pos_x,
float* Voxel_center_pos_y,
float* Voxel_center_pos_z,
bool* Do_we_have_conductivity ):
size_of_array_( Size_of_array )
{
//
//
hipError_t err;
//
// Memory allocation on CUDA device
err = hipMalloc( (void**)&positions_array_x_,
Size_of_array * sizeof(float));
if( err != hipSuccess )
{
printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMalloc( (void**)&positions_array_y_,
Size_of_array * sizeof(float));
if( err != hipSuccess )
{
printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMalloc( (void**)&positions_array_z_,
Size_of_array * sizeof(float));
if( err != hipSuccess )
{
printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMalloc( (void**)&do_we_have_conductivity_,
Size_of_array * sizeof(bool));
if( err != hipSuccess )
{
printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
abort();
}
//
// Copy the array on GPU
err = hipMemcpy( positions_array_x_, Voxel_center_pos_x, Size_of_array * sizeof(float),
hipMemcpyHostToDevice);
if( err != hipSuccess )
{
printf( "CUDA copy failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMemcpy( positions_array_y_, Voxel_center_pos_y, Size_of_array * sizeof(float),
hipMemcpyHostToDevice);
if( err != hipSuccess )
{
printf( "CUDA copy failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMemcpy( positions_array_z_, Voxel_center_pos_z, Size_of_array * sizeof(float),
hipMemcpyHostToDevice);
if( err != hipSuccess )
{
printf( "CUDA copy failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMemcpy( do_we_have_conductivity_, Do_we_have_conductivity, Size_of_array * sizeof(bool),
hipMemcpyHostToDevice);
if( err != hipSuccess )
{
printf( "CUDA copy failled: %s", hipGetErrorString(err) );
abort();
}
}
// //
// //
// //
// Domains::CUDA_Conductivity_matching( const Domains& that )
// {
// }
// //
// //
// //
// Domains::CUDA_Conductivity_matching( Domains&& that )
// {
// }
//
//
//
Domains::CUDA_Conductivity_matching::~CUDA_Conductivity_matching()
{
hipFree( positions_array_x_ );
hipFree( positions_array_y_ );
hipFree( positions_array_z_ );
}
// //
// //
// //
// Domains::CUDA_Conductivity_matching&
// Domains::CUDA_Conductivity_matching::operator = ( const Domains& that )
// {
// if ( this != &that )
// {
// // // free existing ressources
// // if( tab_ )
// // {
// // delete [] tab_;
// // tab_ = nullptr;
// // }
// // // allocating new ressources
// // pos_x_ = that.get_pos_x();
// // pos_y_ = that.get_pos_y();
// // list_position_ = that.get_list_position();
// // //
// // tab_ = new int[4];
// // std::copy( &that.get_tab(), &that.get_tab() + 4, tab_ );
// }
// //
// return *this;
// }
// //
// //
// //
// Domains::CUDA_Conductivity_matching&
// Domains::CUDA_Conductivity_matching::operator = ( Domains&& that )
// {
// if( this != &that )
// {
// // // initialisation
// // pos_x_ = 0;
// // pos_y_ = 0;
// // delete [] tab_;
// // tab_ = nullptr;
// // // pilfer the source
// // list_position_ = std::move( that.list_position_ );
// // pos_x_ = that.get_pos_x();
// // pos_y_ = that.get_pos_y();
// // tab_ = &that.get_tab();
// // // reset that
// // that.set_pos_x( 0 );
// // that.set_pos_y( 0 );
// // that.set_tab( nullptr );
// }
// //
// return *this;
// }
//
//
//
void
Domains::CUDA_Conductivity_matching::find_vertices_voxel_index( float* Vertices_position,
float* Point_min_distance,
int* Point_min_distance_index)
{
//
//
hipError_t err;
float *cell_points;
float *point_min_distance;
int *point_min_distance_index;
//
// Memory allocation on CUDA device
err = hipMalloc( (void**)&cell_points,
5 * 3 * sizeof(float));
if( err != hipSuccess )
{
printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMalloc( (void**)&point_min_distance,
(BLOCKS+REMAIN) * 5 * sizeof(float));
if( err != hipSuccess )
{
printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMalloc( (void**)&point_min_distance_index,
(BLOCKS+REMAIN) * 5 * sizeof(int));
if( err != hipSuccess )
{
printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
abort();
}
//
// Copy the array on GPU
err = hipMemcpy( cell_points, Vertices_position,
5 * 3 * sizeof(float),
hipMemcpyHostToDevice );
if( err != hipSuccess )
{
printf( "1 - CUDA copy failled: %s", hipGetErrorString(err) );
abort();
}
//
// Cuda kernel
// size_of_array_ = 100 * 100 * 60
// size_of_array_ /= 64 = 9375
hipLaunchKernelGGL(( process_kernel) , dim3(BLOCKS), dim3(THREADS) , 0, 0, cell_points, /* input */
positions_array_x_, /* already on GPU */
positions_array_y_, /* already on GPU */
positions_array_z_, /* already on GPU */
do_we_have_conductivity_, /* already on GPU */
point_min_distance, /* output */
point_min_distance_index);/* output */
//
// Copy the array results from GPU to host
err = hipMemcpy( Point_min_distance, point_min_distance,
(BLOCKS+REMAIN) * 5 * sizeof(float),
hipMemcpyDeviceToHost );
if( err != hipSuccess )
{
printf( "2 - CUDA copy failled: %s", hipGetErrorString(err) );
abort();
}
//
err = hipMemcpy( Point_min_distance_index, point_min_distance_index,
(BLOCKS+REMAIN) * 5 * sizeof(int),
hipMemcpyDeviceToHost );
if( err != hipSuccess )
{
printf( "3 - CUDA copy failled: %s", hipGetErrorString(err) );
abort();
}
//
//
hipFree(cell_points);
hipFree(point_min_distance);
hipFree(point_min_distance_index);
}
////
////
////
//void
//Domains::CUDA_Conductivity_matching::find_vertices_voxel_index( float* Vertices_position,
// float* Point_min_distance,
// int* Point_min_distance_index)
//{
// //
// //
// hipError_t err;
// float *cell_points;
// float *point_min_distance;
// int *point_min_distance_index;
//
// //
// // Memory allocation on CUDA device
// err = hipMalloc( (void**)&cell_points,
// 5 * 3 * sizeof(float));
// if( err != hipSuccess )
// {
// printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
// abort();
// }
// //
// err = hipMalloc( (void**)&point_min_distance,
// BLOCKS * 5 * sizeof(float));
// if( err != hipSuccess )
// {
// printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
// abort();
// }
// //
// err = hipMalloc( (void**)&point_min_distance_index,
// BLOCKS * 5 * sizeof(int));
// if( err != hipSuccess )
// {
// printf( "CUDA memory allocation failled: %s", hipGetErrorString(err) );
// abort();
// }
//
//
// //
// // Copy the array on GPU
// err = hipMemcpy( cell_points, Vertices_position,
// 15 * sizeof(float),
// hipMemcpyHostToDevice );
// if( err != hipSuccess )
// {
// printf( "1CUDA copy failled: %s", hipGetErrorString(err) );
// abort();
// }
//
//
// //
// // Cuda kernel
// // size_of_array_ = 100 * 100 * 60
// // size_of_array_ /= 64 = 9375
// hipLaunchKernelGGL(( process_kernel) , dim3(BLOCKS), dim3(THREADS) , 0, 0, cell_points, /* input */
// positions_array_x_, /* already on GPU */
// positions_array_y_, /* already on GPU */
// positions_array_z_, /* already on GPU */
// point_min_distance, /* output */
// point_min_distance_index);/* output */
//
//
// //
// // Copy the array results from GPU to host
// err = hipMemcpy( Point_min_distance, point_min_distance,
// BLOCKS * 5 * sizeof(float),
// hipMemcpyDeviceToHost );
// if( err != hipSuccess )
// {
// printf( "2CUDA copy failled: %s", hipGetErrorString(err) );
// abort();
// }
// //
// err = hipMemcpy( Point_min_distance_index, point_min_distance_index,
// BLOCKS * 5 * sizeof(int),
// hipMemcpyDeviceToHost );
// if( err != hipSuccess )
// {
// printf( "3CUDA copy failled: %s", hipGetErrorString(err) );
// abort();
// }
//
//
// //
// //
// hipFree(cell_points);
// hipFree(point_min_distance);
// hipFree(point_min_distance_index);
//}
//
//
//
std::ostream&
Domains::operator << ( std::ostream& stream,
const Domains::CUDA_Conductivity_matching& that)
{
// std::for_each( that.get_list_position().begin(),
// that.get_list_position().end(),
// [&stream]( int Val )
// {
// stream << "list pos = " << Val << "\n";
// });
// //
// stream << "positions minimum = "
// << that.get_min_x() << " "
// << that.get_min_y() << " "
// << that.get_min_z() << "\n";
// stream << "position y = " << that.get_pos_y() << "\n";
// if ( &that.get_tab() )
// {
// stream << "tab[0] = " << ( &that.get_tab() )[0] << "\n";
// stream << "tab[1] = " << ( &that.get_tab() )[1] << "\n";
// stream << "tab[2] = " << ( &that.get_tab() )[2] << "\n";
// stream << "tab[3] = " << ( &that.get_tab() )[3] << "\n";
// }
//
return stream;
};
| e1642b3068c42e6d6e2a2067026a83f30ddbc965.cu | //
// Project
//
#include "CUDA_Conductivity_matching.h"
#include "CUDA_Conductivity_matching_functions.h"
//
//
//
Domains::CUDA_Conductivity_matching::CUDA_Conductivity_matching():
positions_array_x_(NULL), positions_array_y_(NULL), positions_array_z_(NULL)
{
}
//
//
//
Domains::CUDA_Conductivity_matching::CUDA_Conductivity_matching( int Size_of_array,
float* Voxel_center_pos_x,
float* Voxel_center_pos_y,
float* Voxel_center_pos_z,
bool* Do_we_have_conductivity ):
size_of_array_( Size_of_array )
{
//
//
cudaError_t err;
//
// Memory allocation on CUDA device
err = cudaMalloc( (void**)&positions_array_x_,
Size_of_array * sizeof(float));
if( err != cudaSuccess )
{
printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMalloc( (void**)&positions_array_y_,
Size_of_array * sizeof(float));
if( err != cudaSuccess )
{
printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMalloc( (void**)&positions_array_z_,
Size_of_array * sizeof(float));
if( err != cudaSuccess )
{
printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMalloc( (void**)&do_we_have_conductivity_,
Size_of_array * sizeof(bool));
if( err != cudaSuccess )
{
printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
abort();
}
//
// Copy the array on GPU
err = cudaMemcpy( positions_array_x_, Voxel_center_pos_x, Size_of_array * sizeof(float),
cudaMemcpyHostToDevice);
if( err != cudaSuccess )
{
printf( "CUDA copy failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMemcpy( positions_array_y_, Voxel_center_pos_y, Size_of_array * sizeof(float),
cudaMemcpyHostToDevice);
if( err != cudaSuccess )
{
printf( "CUDA copy failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMemcpy( positions_array_z_, Voxel_center_pos_z, Size_of_array * sizeof(float),
cudaMemcpyHostToDevice);
if( err != cudaSuccess )
{
printf( "CUDA copy failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMemcpy( do_we_have_conductivity_, Do_we_have_conductivity, Size_of_array * sizeof(bool),
cudaMemcpyHostToDevice);
if( err != cudaSuccess )
{
printf( "CUDA copy failled: %s", cudaGetErrorString(err) );
abort();
}
}
// //
// //
// //
// Domains::CUDA_Conductivity_matching( const Domains& that )
// {
// }
// //
// //
// //
// Domains::CUDA_Conductivity_matching( Domains&& that )
// {
// }
//
//
//
Domains::CUDA_Conductivity_matching::~CUDA_Conductivity_matching()
{
cudaFree( positions_array_x_ );
cudaFree( positions_array_y_ );
cudaFree( positions_array_z_ );
}
// //
// //
// //
// Domains::CUDA_Conductivity_matching&
// Domains::CUDA_Conductivity_matching::operator = ( const Domains& that )
// {
// if ( this != &that )
// {
// // // free existing ressources
// // if( tab_ )
// // {
// // delete [] tab_;
// // tab_ = nullptr;
// // }
// // // allocating new ressources
// // pos_x_ = that.get_pos_x();
// // pos_y_ = that.get_pos_y();
// // list_position_ = that.get_list_position();
// // //
// // tab_ = new int[4];
// // std::copy( &that.get_tab(), &that.get_tab() + 4, tab_ );
// }
// //
// return *this;
// }
// //
// //
// //
// Domains::CUDA_Conductivity_matching&
// Domains::CUDA_Conductivity_matching::operator = ( Domains&& that )
// {
// if( this != &that )
// {
// // // initialisation
// // pos_x_ = 0;
// // pos_y_ = 0;
// // delete [] tab_;
// // tab_ = nullptr;
// // // pilfer the source
// // list_position_ = std::move( that.list_position_ );
// // pos_x_ = that.get_pos_x();
// // pos_y_ = that.get_pos_y();
// // tab_ = &that.get_tab();
// // // reset that
// // that.set_pos_x( 0 );
// // that.set_pos_y( 0 );
// // that.set_tab( nullptr );
// }
// //
// return *this;
// }
//
//
//
void
Domains::CUDA_Conductivity_matching::find_vertices_voxel_index( float* Vertices_position,
float* Point_min_distance,
int* Point_min_distance_index)
{
//
//
cudaError_t err;
float *cell_points;
float *point_min_distance;
int *point_min_distance_index;
//
// Memory allocation on CUDA device
err = cudaMalloc( (void**)&cell_points,
5 * 3 * sizeof(float));
if( err != cudaSuccess )
{
printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMalloc( (void**)&point_min_distance,
(BLOCKS+REMAIN) * 5 * sizeof(float));
if( err != cudaSuccess )
{
printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMalloc( (void**)&point_min_distance_index,
(BLOCKS+REMAIN) * 5 * sizeof(int));
if( err != cudaSuccess )
{
printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
abort();
}
//
// Copy the array on GPU
err = cudaMemcpy( cell_points, Vertices_position,
5 * 3 * sizeof(float),
cudaMemcpyHostToDevice );
if( err != cudaSuccess )
{
printf( "1 - CUDA copy failled: %s", cudaGetErrorString(err) );
abort();
}
//
// Cuda kernel
// size_of_array_ = 100 * 100 * 60
// size_of_array_ /= 64 = 9375
process_kernel <<< BLOCKS, THREADS >>> ( cell_points, /* input */
positions_array_x_, /* already on GPU */
positions_array_y_, /* already on GPU */
positions_array_z_, /* already on GPU */
do_we_have_conductivity_, /* already on GPU */
point_min_distance, /* output */
point_min_distance_index);/* output */
//
// Copy the array results from GPU to host
err = cudaMemcpy( Point_min_distance, point_min_distance,
(BLOCKS+REMAIN) * 5 * sizeof(float),
cudaMemcpyDeviceToHost );
if( err != cudaSuccess )
{
printf( "2 - CUDA copy failled: %s", cudaGetErrorString(err) );
abort();
}
//
err = cudaMemcpy( Point_min_distance_index, point_min_distance_index,
(BLOCKS+REMAIN) * 5 * sizeof(int),
cudaMemcpyDeviceToHost );
if( err != cudaSuccess )
{
printf( "3 - CUDA copy failled: %s", cudaGetErrorString(err) );
abort();
}
//
//
cudaFree(cell_points);
cudaFree(point_min_distance);
cudaFree(point_min_distance_index);
}
////
////
////
//void
//Domains::CUDA_Conductivity_matching::find_vertices_voxel_index( float* Vertices_position,
// float* Point_min_distance,
// int* Point_min_distance_index)
//{
// //
// //
// cudaError_t err;
// float *cell_points;
// float *point_min_distance;
// int *point_min_distance_index;
//
// //
// // Memory allocation on CUDA device
// err = cudaMalloc( (void**)&cell_points,
// 5 * 3 * sizeof(float));
// if( err != cudaSuccess )
// {
// printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
// abort();
// }
// //
// err = cudaMalloc( (void**)&point_min_distance,
// BLOCKS * 5 * sizeof(float));
// if( err != cudaSuccess )
// {
// printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
// abort();
// }
// //
// err = cudaMalloc( (void**)&point_min_distance_index,
// BLOCKS * 5 * sizeof(int));
// if( err != cudaSuccess )
// {
// printf( "CUDA memory allocation failled: %s", cudaGetErrorString(err) );
// abort();
// }
//
//
// //
// // Copy the array on GPU
// err = cudaMemcpy( cell_points, Vertices_position,
// 15 * sizeof(float),
// cudaMemcpyHostToDevice );
// if( err != cudaSuccess )
// {
// printf( "1CUDA copy failled: %s", cudaGetErrorString(err) );
// abort();
// }
//
//
// //
// // Cuda kernel
// // size_of_array_ = 100 * 100 * 60
// // size_of_array_ /= 64 = 9375
// process_kernel <<< BLOCKS, THREADS >>> ( cell_points, /* input */
// positions_array_x_, /* already on GPU */
// positions_array_y_, /* already on GPU */
// positions_array_z_, /* already on GPU */
// point_min_distance, /* output */
// point_min_distance_index);/* output */
//
//
// //
// // Copy the array results from GPU to host
// err = cudaMemcpy( Point_min_distance, point_min_distance,
// BLOCKS * 5 * sizeof(float),
// cudaMemcpyDeviceToHost );
// if( err != cudaSuccess )
// {
// printf( "2CUDA copy failled: %s", cudaGetErrorString(err) );
// abort();
// }
// //
// err = cudaMemcpy( Point_min_distance_index, point_min_distance_index,
// BLOCKS * 5 * sizeof(int),
// cudaMemcpyDeviceToHost );
// if( err != cudaSuccess )
// {
// printf( "3CUDA copy failled: %s", cudaGetErrorString(err) );
// abort();
// }
//
//
// //
// //
// cudaFree(cell_points);
// cudaFree(point_min_distance);
// cudaFree(point_min_distance_index);
//}
//
//
//
std::ostream&
Domains::operator << ( std::ostream& stream,
const Domains::CUDA_Conductivity_matching& that)
{
// std::for_each( that.get_list_position().begin(),
// that.get_list_position().end(),
// [&stream]( int Val )
// {
// stream << "list pos = " << Val << "\n";
// });
// //
// stream << "positions minimum = "
// << that.get_min_x() << " "
// << that.get_min_y() << " "
// << that.get_min_z() << "\n";
// stream << "position y = " << that.get_pos_y() << "\n";
// if ( &that.get_tab() )
// {
// stream << "tab[0] = " << ( &that.get_tab() )[0] << "\n";
// stream << "tab[1] = " << ( &that.get_tab() )[1] << "\n";
// stream << "tab[2] = " << ( &that.get_tab() )[2] << "\n";
// stream << "tab[3] = " << ( &that.get_tab() )[3] << "\n";
// }
//
return stream;
};
|
ad28e772bbe0950658f355f66910f3f2788ebb87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__device__ int getTid()
{
int bid = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int tPB = blockDim.x * blockDim.y ;
int fin = bid*tPB+tid;
}
__global__ void mulElement(int *a ,int *b , int *c , int ha , int wb,int wa)
{
int th = getTid();
if(th<(ha*wb))
{
int row = th/wb;
int col = th%wb;
int i = 0 , sum = 0;
for(i = 0;i<wa;i++)
{
sum += a[row*wa+i]*b[wb*i+col];
}
c[th] = sum;
}
}
int main(void)
{
int *a,*b,*t,i,j;
int *d_a,*d_b,*d_t;
int ha , wa;
int hb , wb;
printf("Enter the dimensions of first matrix \n ");
scanf("%d %d",&ha,&wa);
printf("Enter the dimensions of second matrix \n");
scanf("%d %d",&hb,&wb);
int size1 = sizeof(int)*ha*wa;
int size2 = sizeof(int)*hb*wb;
int size3 = sizeof(int)*ha*wb;
a = (int*)malloc(ha*wa*sizeof(int));
b = (int*)malloc(hb*wb*sizeof(int));
t = (int*)malloc(ha*wb*sizeof(int));
printf("Enter input matrix 1 : \n");
for(i = 0;i<ha*wa;i++)
scanf("%d",&a[i]);
printf("Enter input matrix 2 : \n");
for(i = 0;i<hb*wb;i++)
scanf("%d",&b[i]);
hipMalloc((void**)&d_a,size1);
hipMalloc((void**)&d_b,size2);
hipMalloc((void**)&d_t,size3);
hipMemcpy(d_a,a,size1,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size2,hipMemcpyHostToDevice);
int gx,gy,bx,by;
printf("Enter the dimension of the grid \n");
scanf("%d %d",&gx,&gy);
bx = ceil((double)ha/gx);
by = ceil((double)wb/gy);
printf("The dimensions of block are : \n %d %d \n",bx,by);
dim3 grid(gx,gy);
dim3 block(bx,by);
hipLaunchKernelGGL(( mulElement), dim3(grid),dim3(block), 0, 0, d_a,d_b,d_t,ha,wb,wa);
hipMemcpy(t,d_t,size3,hipMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<ha;i++)
{
for(j = 0;j<wb;j++)
printf("%d ",t[i*wb+j]);
printf("\n");
}
getchar();
hipFree(d_a);
hipFree(d_t);
return 0;
} | ad28e772bbe0950658f355f66910f3f2788ebb87.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__device__ int getTid()
{
int bid = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int tPB = blockDim.x * blockDim.y ;
int fin = bid*tPB+tid;
}
__global__ void mulElement(int *a ,int *b , int *c , int ha , int wb,int wa)
{
int th = getTid();
if(th<(ha*wb))
{
int row = th/wb;
int col = th%wb;
int i = 0 , sum = 0;
for(i = 0;i<wa;i++)
{
sum += a[row*wa+i]*b[wb*i+col];
}
c[th] = sum;
}
}
int main(void)
{
int *a,*b,*t,i,j;
int *d_a,*d_b,*d_t;
int ha , wa;
int hb , wb;
printf("Enter the dimensions of first matrix \n ");
scanf("%d %d",&ha,&wa);
printf("Enter the dimensions of second matrix \n");
scanf("%d %d",&hb,&wb);
int size1 = sizeof(int)*ha*wa;
int size2 = sizeof(int)*hb*wb;
int size3 = sizeof(int)*ha*wb;
a = (int*)malloc(ha*wa*sizeof(int));
b = (int*)malloc(hb*wb*sizeof(int));
t = (int*)malloc(ha*wb*sizeof(int));
printf("Enter input matrix 1 : \n");
for(i = 0;i<ha*wa;i++)
scanf("%d",&a[i]);
printf("Enter input matrix 2 : \n");
for(i = 0;i<hb*wb;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_b,size2);
cudaMalloc((void**)&d_t,size3);
cudaMemcpy(d_a,a,size1,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size2,cudaMemcpyHostToDevice);
int gx,gy,bx,by;
printf("Enter the dimension of the grid \n");
scanf("%d %d",&gx,&gy);
bx = ceil((double)ha/gx);
by = ceil((double)wb/gy);
printf("The dimensions of block are : \n %d %d \n",bx,by);
dim3 grid(gx,gy);
dim3 block(bx,by);
mulElement<<<grid,block>>>(d_a,d_b,d_t,ha,wb,wa);
cudaMemcpy(t,d_t,size3,cudaMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<ha;i++)
{
for(j = 0;j<wb;j++)
printf("%d ",t[i*wb+j]);
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
} |
e8ffd6b07da935082baef19959cd39eeb24acda8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// @file fdwt53.cu
/// @brief CUDA implementation of forward 5/3 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-02-04 13:23
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps buffer and methods needed for computing one level of 5/3 FDWT
/// using sliding window approach.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class FDWT53 {
private:
/// Info needed for processing of one input column.
/// @tparam CHECKED_LOADER true if column's loader should check boundaries
/// false if there are no near boudnaries to check
template <bool CHECKED_LOADER>
struct FDWT53Column {
/// loader for the column
VerticalDWTPixelLoader<int, CHECKED_LOADER> loader;
/// offset of the column in shared buffer
int offset;
// backup of first 3 loaded pixels (not transformed)
int pixel0, pixel1, pixel2;
/// Sets all fields to anything to prevent 'uninitialized' warnings.
__device__ void clear() {
offset = pixel0 = pixel1 = pixel2 = 0;
loader.clear();
}
};
/// Type of shared memory buffer for 5/3 FDWT transforms.
typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> FDWT53Buffer;
/// Actual shared buffer used for forward 5/3 DWT.
FDWT53Buffer buffer;
/// Difference between indices of two vertical neighbors in buffer.
enum { STRIDE = FDWT53Buffer::VERTICAL_STRIDE };
/// Forward 5/3 DWT predict operation.
struct Forward53Predict {
__device__ void operator() (const int p, int & c, const int n) const {
// c = n;
c -= (p + n) / 2; // F.8, page 126, ITU-T Rec. T.800 final draft the real one
}
};
/// Forward 5/3 DWT update operation.
struct Forward53Update {
__device__ void operator() (const int p, int & c, const int n) const {
c += (p + n + 2) / 4; // F.9, page 126, ITU-T Rec. T.800 final draft
}
};
/// Initializes one column: computes offset of the column in shared memory
/// buffer, initializes loader and finally uses it to load first 3 pixels.
/// @tparam CHECKED true if loader of the column checks boundaries
/// @param column (uninitialized) column info to be initialized
/// @param input input image
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param colIndex x-axis coordinate of the column (relative to the left
/// side of this threadblock's block of input pixels)
/// @param firstY y-axis coordinate of first image row to be transformed
template <bool CHECKED>
__device__ void initColumn(FDWT53Column<CHECKED> & column,
const int * const input,
const int sizeX, const int sizeY,
const int colIndex, const int firstY) {
// get offset of the column with index 'cId'
column.offset = buffer.getColumnOffset(colIndex);
// coordinates of the first pixel to be loaded
const int firstX = blockIdx.x * WIN_SIZE_X + colIndex;
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 3 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
column.pixel2 = column.loader.loadFrom(input); // loaded pixel #0
column.pixel1 = column.loader.loadFrom(input); // loaded pixel #1
column.pixel0 = column.loader.loadFrom(input); // loaded pixel #2
// reinitialize loader to start with pixel #1 again
column.loader.init(sizeX, sizeY, firstX, firstY + 1);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 2);
// load 3 rows into the column
column.pixel0 = column.loader.loadFrom(input);
column.pixel1 = column.loader.loadFrom(input);
column.pixel2 = column.loader.loadFrom(input);
// Now, the next pixel, which will be loaded by loader, is pixel #1.
}
}
/// Loads and vertically transforms given column. Assumes that first 3
/// pixels are already loaded in column fields pixel0 ... pixel2.
/// @tparam CHECKED true if loader of the column checks boundaries
/// @param column column to be loaded and vertically transformed
/// @param input pointer to input image data
template <bool CHECKED>
__device__ void loadAndVerticallyTransform(FDWT53Column<CHECKED> & column,
const int * const input) {
// take 3 loaded pixels and put them into shared memory transform buffer
buffer[column.offset + 0 * STRIDE] = column.pixel0;
buffer[column.offset + 1 * STRIDE] = column.pixel1;
buffer[column.offset + 2 * STRIDE] = column.pixel2;
// load remaining pixels to be able to vertically transform the window
for(int i = 3; i < (3 + WIN_SIZE_Y); i++)
{
buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input);
}
// remember last 3 pixels for use in next iteration
column.pixel0 = buffer[column.offset + (WIN_SIZE_Y + 0) * STRIDE];
column.pixel1 = buffer[column.offset + (WIN_SIZE_Y + 1) * STRIDE];
column.pixel2 = buffer[column.offset + (WIN_SIZE_Y + 2) * STRIDE];
// vertically transform the column in transform buffer
buffer.forEachVerticalOdd(column.offset, Forward53Predict());
buffer.forEachVerticalEven(column.offset, Forward53Update());
}
/// Actual implementation of 5/3 FDWT.
/// @tparam CHECK_LOADS true if input loader must check boundaries
/// @tparam CHECK_WRITES true if output writer must check boundaries
/// @param in input image
/// @param out output buffer
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param winSteps number of sliding window steps
template <bool CHECK_LOADS, bool CHECK_WRITES>
__device__ void transform(const int * const in, int * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about one main and one boundary columns processed by this thread
FDWT53Column<CHECK_LOADS> column;
FDWT53Column<CHECK_LOADS> boundaryColumn; // only few threads use this
// Initialize all column info: initialize loaders, compute offset of
// column in shared buffer and initialize loader of column.
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
initColumn(column, in, sizeX, sizeY, threadIdx.x, firstY); //has been checked Mar 9th
// first 3 threads initialize boundary columns, others do not use them
boundaryColumn.clear();
if(threadIdx.x < 3) {
// index of boundary column (relative x-axis coordinate of the column)
const int colId = threadIdx.x + ((threadIdx.x == 0) ? WIN_SIZE_X : -3);
// initialize the column
initColumn(boundaryColumn, in, sizeX, sizeY, colId, firstY);
}
// index of column which will be written into output by this thread
const int outColumnIndex = parityIdx<WIN_SIZE_X>();
// offset of column which will be written by this thread into output
const int outColumnOffset = buffer.getColumnOffset(outColumnIndex);
// initialize output writer for this thread
const int outputFirstX = blockIdx.x * WIN_SIZE_X + outColumnIndex;
VerticalDWTBandWriter<int, CHECK_WRITES> writer;
writer.init(sizeX, sizeY, outputFirstX, firstY);
// Sliding window iterations:
// Each iteration assumes that first 3 pixels of each column are loaded.
for(int w = 0; w < winSteps; w++) {
// For each column (including boundary columns): load and vertically
// transform another WIN_SIZE_Y lines.
loadAndVerticallyTransform(column, in);
if(threadIdx.x < 3) {
loadAndVerticallyTransform(boundaryColumn, in);
}
// wait for all columns to be vertically transformed and transform all
// output rows horizontally
__syncthreads();
buffer.forEachHorizontalOdd(2, WIN_SIZE_Y, Forward53Predict());
__syncthreads();
buffer.forEachHorizontalEven(2, WIN_SIZE_Y, Forward53Update());
// wait for all output rows to be transformed horizontally and write
// them into output buffer
__syncthreads();
for(int r = 2; r < (2 + WIN_SIZE_Y); r += 2) {
// Write low coefficients from output column into low band ...
writer.writeLowInto(out, buffer[outColumnOffset + r * STRIDE]);
// ... and high coeficients into the high band.
writer.writeHighInto(out, buffer[outColumnOffset + (r+1) * STRIDE]);
}
// before proceeding to next iteration, wait for all output columns
// to be written into the output
__syncthreads();
}
}
public:
/// Determines, whether this block's pixels touch boundary and selects
/// right version of algorithm according to it - for many threadblocks, it
/// selects version which does not deal with boundary mirroring and thus is
/// slightly faster.
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
/// @param steps number of sliding window steps
__device__ static void run(const int * const in, int * const out,
const int sx, const int sy, const int steps) {
// if(blockIdx.x==0 && blockIdx.y ==11 && threadIdx.x >=0&&threadIdx.x <64){
// object with transform buffer in shared memory
__shared__ FDWT53<WIN_SIZE_X, WIN_SIZE_Y> fdwt53;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (1 in next expressions is for radius of impulse response of 9/7 FDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary)
{
// near bottom boundary => check both writing and reading
fdwt53.transform<true, true>(in, out, sx, sy, steps);
} else if(atRightBoudary)
{
// near right boundary only => check writing only
fdwt53.transform<false, true>(in, out, sx, sy, steps);
} else
{
// no nearby boundary => check nothing
fdwt53.transform<false, false>(in, out, sx, sy, steps);
}
}
// }
}; // end of class FDWT53
/// Main GPU 5/3 FDWT entry point.
/// @tparam WIN_SX width of sliding window to be used
/// @tparam WIN_SY height of sliding window to be used
/// @param input input image
/// @param output output buffer
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param winSteps number of sliding window steps
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT53<WIN_SX, WIN_SY>), 8))
__global__ void fdwt53Kernel(const int * const input, int * const output,
const int sizeX, const int sizeY,
const int winSteps) {
FDWT53<WIN_SX, WIN_SY>::run(input, output, sizeX, sizeY, winSteps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 5/3 FDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchFDWT53Kernel (int * in, int * out, int sx, int sy) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
int gx = divRndUp(sx, WIN_SX);
int gy = divRndUp(sy, WIN_SY * steps);
printf("\n sliding steps = %d , gx = %d , gy = %d \n", steps, gx, gy);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// printf("\n globalx=%d, globaly=%d, blocksize=%d\n", gSize.x, gSize.y, WIN_SX);
// run kernel, possibly measure time and finally check the call
// PERF_BEGIN
hipLaunchKernelGGL(( fdwt53Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps);
// PERF_END(" FDWT53", sx, sy)
// CudaDWTTester::checkLastKernelCall("FDWT 5/3 kernel");
printf("fdwt53Kernel in launchFDWT53Kernel has finished");
}
/// Forward 5/3 2D DWT. See common rules (above) for more details.
/// @param in Expected to be normalized into range [-128, 127].
/// Will not be preserved (will be overwritten).
/// @param out output buffer on GPU
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
void fdwt53(int * in, int * out, int sizeX, int sizeY, int levels) {
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchFDWT53Kernel<192, 8>(in, out, sizeX, sizeY);
} else if (sizeX >= 480) {
launchFDWT53Kernel<128, 8>(in, out, sizeX, sizeY);
} else {
launchFDWT53Kernel<64, 8>(in, out, sizeX, sizeY);
}
// if this was not the last level, continue recursively with other levels
if(levels > 1) {
// copy output's LL band back into input buffer
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
// printf("\n llSizeX = %d , llSizeY = %d \n", llSizeX, llSizeY);
memCopy(in, out, llSizeX, llSizeY); //the function memCopy in cuda_dwt/common.h line 238
// run remaining levels of FDWT
fdwt53(in, out, llSizeX, llSizeY, levels - 1);
}
}
} // end of namespace dwt_cuda
| e8ffd6b07da935082baef19959cd39eeb24acda8.cu | /// @file fdwt53.cu
/// @brief CUDA implementation of forward 5/3 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-02-04 13:23
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps buffer and methods needed for computing one level of 5/3 FDWT
/// using sliding window approach.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class FDWT53 {
private:
/// Info needed for processing of one input column.
/// @tparam CHECKED_LOADER true if column's loader should check boundaries
/// false if there are no near boudnaries to check
template <bool CHECKED_LOADER>
struct FDWT53Column {
/// loader for the column
VerticalDWTPixelLoader<int, CHECKED_LOADER> loader;
/// offset of the column in shared buffer
int offset;
// backup of first 3 loaded pixels (not transformed)
int pixel0, pixel1, pixel2;
/// Sets all fields to anything to prevent 'uninitialized' warnings.
__device__ void clear() {
offset = pixel0 = pixel1 = pixel2 = 0;
loader.clear();
}
};
/// Type of shared memory buffer for 5/3 FDWT transforms.
typedef TransformBuffer<int, WIN_SIZE_X, WIN_SIZE_Y + 3, 2> FDWT53Buffer;
/// Actual shared buffer used for forward 5/3 DWT.
FDWT53Buffer buffer;
/// Difference between indices of two vertical neighbors in buffer.
enum { STRIDE = FDWT53Buffer::VERTICAL_STRIDE };
/// Forward 5/3 DWT predict operation.
struct Forward53Predict {
__device__ void operator() (const int p, int & c, const int n) const {
// c = n;
c -= (p + n) / 2; // F.8, page 126, ITU-T Rec. T.800 final draft the real one
}
};
/// Forward 5/3 DWT update operation.
struct Forward53Update {
__device__ void operator() (const int p, int & c, const int n) const {
c += (p + n + 2) / 4; // F.9, page 126, ITU-T Rec. T.800 final draft
}
};
/// Initializes one column: computes offset of the column in shared memory
/// buffer, initializes loader and finally uses it to load first 3 pixels.
/// @tparam CHECKED true if loader of the column checks boundaries
/// @param column (uninitialized) column info to be initialized
/// @param input input image
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param colIndex x-axis coordinate of the column (relative to the left
/// side of this threadblock's block of input pixels)
/// @param firstY y-axis coordinate of first image row to be transformed
template <bool CHECKED>
__device__ void initColumn(FDWT53Column<CHECKED> & column,
const int * const input,
const int sizeX, const int sizeY,
const int colIndex, const int firstY) {
// get offset of the column with index 'cId'
column.offset = buffer.getColumnOffset(colIndex);
// coordinates of the first pixel to be loaded
const int firstX = blockIdx.x * WIN_SIZE_X + colIndex;
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 3 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
column.pixel2 = column.loader.loadFrom(input); // loaded pixel #0
column.pixel1 = column.loader.loadFrom(input); // loaded pixel #1
column.pixel0 = column.loader.loadFrom(input); // loaded pixel #2
// reinitialize loader to start with pixel #1 again
column.loader.init(sizeX, sizeY, firstX, firstY + 1);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 2);
// load 3 rows into the column
column.pixel0 = column.loader.loadFrom(input);
column.pixel1 = column.loader.loadFrom(input);
column.pixel2 = column.loader.loadFrom(input);
// Now, the next pixel, which will be loaded by loader, is pixel #1.
}
}
/// Loads and vertically transforms given column. Assumes that first 3
/// pixels are already loaded in column fields pixel0 ... pixel2.
/// @tparam CHECKED true if loader of the column checks boundaries
/// @param column column to be loaded and vertically transformed
/// @param input pointer to input image data
template <bool CHECKED>
__device__ void loadAndVerticallyTransform(FDWT53Column<CHECKED> & column,
const int * const input) {
// take 3 loaded pixels and put them into shared memory transform buffer
buffer[column.offset + 0 * STRIDE] = column.pixel0;
buffer[column.offset + 1 * STRIDE] = column.pixel1;
buffer[column.offset + 2 * STRIDE] = column.pixel2;
// load remaining pixels to be able to vertically transform the window
for(int i = 3; i < (3 + WIN_SIZE_Y); i++)
{
buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input);
}
// remember last 3 pixels for use in next iteration
column.pixel0 = buffer[column.offset + (WIN_SIZE_Y + 0) * STRIDE];
column.pixel1 = buffer[column.offset + (WIN_SIZE_Y + 1) * STRIDE];
column.pixel2 = buffer[column.offset + (WIN_SIZE_Y + 2) * STRIDE];
// vertically transform the column in transform buffer
buffer.forEachVerticalOdd(column.offset, Forward53Predict());
buffer.forEachVerticalEven(column.offset, Forward53Update());
}
/// Actual implementation of 5/3 FDWT.
/// @tparam CHECK_LOADS true if input loader must check boundaries
/// @tparam CHECK_WRITES true if output writer must check boundaries
/// @param in input image
/// @param out output buffer
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param winSteps number of sliding window steps
template <bool CHECK_LOADS, bool CHECK_WRITES>
__device__ void transform(const int * const in, int * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about one main and one boundary columns processed by this thread
FDWT53Column<CHECK_LOADS> column;
FDWT53Column<CHECK_LOADS> boundaryColumn; // only few threads use this
// Initialize all column info: initialize loaders, compute offset of
// column in shared buffer and initialize loader of column.
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
initColumn(column, in, sizeX, sizeY, threadIdx.x, firstY); //has been checked Mar 9th
// first 3 threads initialize boundary columns, others do not use them
boundaryColumn.clear();
if(threadIdx.x < 3) {
// index of boundary column (relative x-axis coordinate of the column)
const int colId = threadIdx.x + ((threadIdx.x == 0) ? WIN_SIZE_X : -3);
// initialize the column
initColumn(boundaryColumn, in, sizeX, sizeY, colId, firstY);
}
// index of column which will be written into output by this thread
const int outColumnIndex = parityIdx<WIN_SIZE_X>();
// offset of column which will be written by this thread into output
const int outColumnOffset = buffer.getColumnOffset(outColumnIndex);
// initialize output writer for this thread
const int outputFirstX = blockIdx.x * WIN_SIZE_X + outColumnIndex;
VerticalDWTBandWriter<int, CHECK_WRITES> writer;
writer.init(sizeX, sizeY, outputFirstX, firstY);
// Sliding window iterations:
// Each iteration assumes that first 3 pixels of each column are loaded.
for(int w = 0; w < winSteps; w++) {
// For each column (including boundary columns): load and vertically
// transform another WIN_SIZE_Y lines.
loadAndVerticallyTransform(column, in);
if(threadIdx.x < 3) {
loadAndVerticallyTransform(boundaryColumn, in);
}
// wait for all columns to be vertically transformed and transform all
// output rows horizontally
__syncthreads();
buffer.forEachHorizontalOdd(2, WIN_SIZE_Y, Forward53Predict());
__syncthreads();
buffer.forEachHorizontalEven(2, WIN_SIZE_Y, Forward53Update());
// wait for all output rows to be transformed horizontally and write
// them into output buffer
__syncthreads();
for(int r = 2; r < (2 + WIN_SIZE_Y); r += 2) {
// Write low coefficients from output column into low band ...
writer.writeLowInto(out, buffer[outColumnOffset + r * STRIDE]);
// ... and high coeficients into the high band.
writer.writeHighInto(out, buffer[outColumnOffset + (r+1) * STRIDE]);
}
// before proceeding to next iteration, wait for all output columns
// to be written into the output
__syncthreads();
}
}
public:
/// Determines, whether this block's pixels touch boundary and selects
/// right version of algorithm according to it - for many threadblocks, it
/// selects version which does not deal with boundary mirroring and thus is
/// slightly faster.
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
/// @param steps number of sliding window steps
__device__ static void run(const int * const in, int * const out,
const int sx, const int sy, const int steps) {
// if(blockIdx.x==0 && blockIdx.y ==11 && threadIdx.x >=0&&threadIdx.x <64){
// object with transform buffer in shared memory
__shared__ FDWT53<WIN_SIZE_X, WIN_SIZE_Y> fdwt53;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (1 in next expressions is for radius of impulse response of 9/7 FDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary)
{
// near bottom boundary => check both writing and reading
fdwt53.transform<true, true>(in, out, sx, sy, steps);
} else if(atRightBoudary)
{
// near right boundary only => check writing only
fdwt53.transform<false, true>(in, out, sx, sy, steps);
} else
{
// no nearby boundary => check nothing
fdwt53.transform<false, false>(in, out, sx, sy, steps);
}
}
// }
}; // end of class FDWT53
/// Main GPU 5/3 FDWT entry point.
/// @tparam WIN_SX width of sliding window to be used
/// @tparam WIN_SY height of sliding window to be used
/// @param input input image
/// @param output output buffer
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param winSteps number of sliding window steps
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT53<WIN_SX, WIN_SY>), 8))
__global__ void fdwt53Kernel(const int * const input, int * const output,
const int sizeX, const int sizeY,
const int winSteps) {
FDWT53<WIN_SX, WIN_SY>::run(input, output, sizeX, sizeY, winSteps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 5/3 FDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchFDWT53Kernel (int * in, int * out, int sx, int sy) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
int gx = divRndUp(sx, WIN_SX);
int gy = divRndUp(sy, WIN_SY * steps);
printf("\n sliding steps = %d , gx = %d , gy = %d \n", steps, gx, gy);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// printf("\n globalx=%d, globaly=%d, blocksize=%d\n", gSize.x, gSize.y, WIN_SX);
// run kernel, possibly measure time and finally check the call
// PERF_BEGIN
fdwt53Kernel<WIN_SX, WIN_SY><<<gSize, WIN_SX>>>(in, out, sx, sy, steps);
// PERF_END(" FDWT53", sx, sy)
// CudaDWTTester::checkLastKernelCall("FDWT 5/3 kernel");
printf("fdwt53Kernel in launchFDWT53Kernel has finished");
}
/// Forward 5/3 2D DWT. See common rules (above) for more details.
/// @param in Expected to be normalized into range [-128, 127].
/// Will not be preserved (will be overwritten).
/// @param out output buffer on GPU
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
void fdwt53(int * in, int * out, int sizeX, int sizeY, int levels) {
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchFDWT53Kernel<192, 8>(in, out, sizeX, sizeY);
} else if (sizeX >= 480) {
launchFDWT53Kernel<128, 8>(in, out, sizeX, sizeY);
} else {
launchFDWT53Kernel<64, 8>(in, out, sizeX, sizeY);
}
// if this was not the last level, continue recursively with other levels
if(levels > 1) {
// copy output's LL band back into input buffer
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
// printf("\n llSizeX = %d , llSizeY = %d \n", llSizeX, llSizeY);
memCopy(in, out, llSizeX, llSizeY); //the function memCopy in cuda_dwt/common.h line 238
// run remaining levels of FDWT
fdwt53(in, out, llSizeX, llSizeY, levels - 1);
}
}
} // end of namespace dwt_cuda
|
92a3e755c8882577381280b8d6800e490921e4ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/book.h"
int main(void) {
hipDeviceProp_t prop;
int dev;
HANDLE_ERROR(hipGetDevice(&dev));
printf("ID of current CUDA device: %d\n", dev);
memset(&prop, 0, sizeof(hipDeviceProp_t));
prop.major = 7;
prop.minor = 5;
HANDLE_ERROR(hipChooseDevice(&dev, &prop));
printf("ID of CUDA device closest to revision %d.%d: %d\n", prop.major,
prop.minor, dev);
HANDLE_ERROR(hipSetDevice(dev));
} | 92a3e755c8882577381280b8d6800e490921e4ba.cu | #include "../common/book.h"
int main(void) {
cudaDeviceProp prop;
int dev;
HANDLE_ERROR(cudaGetDevice(&dev));
printf("ID of current CUDA device: %d\n", dev);
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 7;
prop.minor = 5;
HANDLE_ERROR(cudaChooseDevice(&dev, &prop));
printf("ID of CUDA device closest to revision %d.%d: %d\n", prop.major,
prop.minor, dev);
HANDLE_ERROR(cudaSetDevice(dev));
} |
448f421c6dae993f8c8031840f571ef3109e6bd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* CUDA Kernel functions.
*
* Notes:
*
* * use `-1` as *null pointer*
* * last-level cell and then attempts to lock the appropriate child pointer (an array index) by writing an
otherwise unused value (2) to it using an atomic operation
*/
#include "../include/Kernels.cuh"
//__device__ const int blockSize = 256; //256;
//extern __shared__ float buffer[];
//__device__ const int warp = 32;
//__device__ const int stackSize = 64;
__device__ const float eps_squared = 0.0025;
__device__ const float theta = 1.5; //0.5; //1.5; //0.5;
__global__ void resetArraysKernel(int *mutex, float *x, float *y, float *z, float *mass, int *count, int *start,
int *sorted, int *child, int *index, float *minX, float *maxX,
float *minY, float *maxY, float *minZ, float *maxZ, int n, int m,
int *procCounter, int *procCounterTemp) {
int bodyIndex = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
// reset quadtree arrays
while(bodyIndex + offset < m) {
#pragma unroll 8
for (int i=0; i<8; i++) {
child[(bodyIndex + offset)*8 + i] = -1;
}
if (bodyIndex + offset < n) {
count[bodyIndex + offset] = 1;
}
else {
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
z[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
count[bodyIndex + offset] = 0;
}
start[bodyIndex + offset] = -1;
sorted[bodyIndex + offset] = 0;
offset += stride;
}
// reset quadtree pointers
if (bodyIndex == 0) {
*mutex = 0;
*index = n;
*minX = 0;
*maxX = 0;
*minY = 0;
*maxY = 0;
*minZ = 0;
*maxZ = 0;
procCounter[0] = 0;
procCounter[1] = 0;
procCounterTemp[0] = 0;
procCounterTemp[1] = 0;
}
}
__global__ void resetArraysParallelKernel(int *domainListIndex, unsigned long *domainListKeys,
int *domainListIndices, int *domainListLevels,
int *lowestDomainListIndices, int *lowestDomainListIndex,
unsigned long *lowestDomainListKeys, unsigned long *sortedLowestDomainListKeys,
float *tempArray, int *to_delete_cell, int *to_delete_leaf, int n, int m) {
int bodyIndex = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < n) {
tempArray[bodyIndex + offset] = 0;
if ((bodyIndex + offset) < DOMAIN_LIST_SIZE) {
domainListLevels[bodyIndex + offset] = -1;
domainListKeys[bodyIndex + offset] = KEY_MAX;
domainListIndices[bodyIndex + offset] = -1;
lowestDomainListIndices[bodyIndex + offset] = -1;
lowestDomainListKeys[bodyIndex + offset] = KEY_MAX;
sortedLowestDomainListKeys[bodyIndex + offset] = KEY_MAX;
offset += stride;
}
offset += stride;
}
if (bodyIndex == 0) {
*domainListIndex = 0;
*lowestDomainListIndex = 0;
to_delete_cell[0] = -1;
to_delete_cell[1] = -1;
to_delete_leaf[0] = -1;
to_delete_leaf[1] = -1;
}
}
// Kernel 1: computes bounding box around all bodies
__global__ void computeBoundingBoxKernel(int *mutex, float *x, float *y, float *z, float *minX, float *maxX,
float *minY, float *maxY, float *minZ, float *maxZ, int n, int blockSize)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
// initialize local min/max
float x_min = x[index];
float x_max = x[index];
float y_min = y[index];
float y_max = y[index];
float z_min = z[index];
float z_max = z[index];
extern __shared__ float buffer[];
float* x_min_buffer = (float*)buffer;
float* x_max_buffer = (float*)&x_min_buffer[blockSize];
float* y_min_buffer = (float*)&x_max_buffer[blockSize];
float* y_max_buffer = (float*)&y_min_buffer[blockSize];
float* z_min_buffer = (float*)&y_max_buffer[blockSize];
float* z_max_buffer = (float*)&z_min_buffer[blockSize];
int offset = stride;
// find (local) min/max
while (index + offset < n) {
x_min = fminf(x_min, x[index + offset]);
x_max = fmaxf(x_max, x[index + offset]);
y_min = fminf(y_min, y[index + offset]);
y_max = fmaxf(y_max, y[index + offset]);
z_min = fminf(z_min, z[index + offset]);
z_max = fmaxf(z_max, z[index + offset]);
offset += stride;
}
// save value in corresponding buffer
x_min_buffer[threadIdx.x] = x_min;
x_max_buffer[threadIdx.x] = x_max;
y_min_buffer[threadIdx.x] = y_min;
y_max_buffer[threadIdx.x] = y_max;
z_min_buffer[threadIdx.x] = z_min;
z_max_buffer[threadIdx.x] = z_max;
// synchronize threads / wait for unfinished threads
__syncthreads();
int i = blockDim.x/2; // assuming blockDim.x is a power of 2!
// reduction within block
while (i != 0) {
if (threadIdx.x < i) {
x_min_buffer[threadIdx.x] = fminf(x_min_buffer[threadIdx.x], x_min_buffer[threadIdx.x + i]);
x_max_buffer[threadIdx.x] = fmaxf(x_max_buffer[threadIdx.x], x_max_buffer[threadIdx.x + i]);
y_min_buffer[threadIdx.x] = fminf(y_min_buffer[threadIdx.x], y_min_buffer[threadIdx.x + i]);
y_max_buffer[threadIdx.x] = fmaxf(y_max_buffer[threadIdx.x], y_max_buffer[threadIdx.x + i]);
z_min_buffer[threadIdx.x] = fminf(z_min_buffer[threadIdx.x], z_min_buffer[threadIdx.x + i]);
z_max_buffer[threadIdx.x] = fmaxf(z_max_buffer[threadIdx.x], z_max_buffer[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
// combining the results and generate the root cell
if (threadIdx.x == 0) {
while (atomicCAS(mutex, 0 ,1) != 0); // lock
*minX = fminf(*minX, x_min_buffer[0]);
*maxX = fmaxf(*maxX, x_max_buffer[0]);
*minY = fminf(*minY, y_min_buffer[0]);
*maxY = fmaxf(*maxY, y_max_buffer[0]);
*minZ = fminf(*minZ, z_min_buffer[0]);
*maxZ = fmaxf(*maxZ, z_max_buffer[0]);
atomicExch(mutex, 0); // unlock
}
}
// (currently) not needed: mass, count, start, child, index, (counter)
__global__ void particlesPerProcessKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, SubDomainKeyTree *s,
int *procCounter, int *procCounterTemp, int curveType) {
//go over domain list (only the ones inherited by own process) and count particles (using count array)
//BUT: for now use this approach!
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long key;
int proc;
while ((bodyIndex + offset) < n) {
// calculate particle key from particle's position
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, 21);
// get corresponding process
proc = key2proc(key, s, curveType);
// increment corresponding counter
atomicAdd(&procCounter[proc], 1);
offset += stride;
}
}
// (currently) not needed: mass, count, start, child, index, (counter)
__global__ void markParticlesProcessKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, SubDomainKeyTree *s,
int *procCounter, int *procCounterTemp, int *sortArray, int curveType) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long key;
int proc;
int counter;
while ((bodyIndex + offset) < n) {
// calculate particle key from particle's position
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, 21);
// get corresponding process
proc = key2proc(key, s, curveType);
/*// increment corresponding counter
counter = atomicAdd(&procCounterTemp[proc], 1)
if (proc > 0) {
sortArray[bodyIndex + offset] = procCounter[proc-1] + counter;
}
else {
sortArray[bodyIndex + offset] = counter;
}*/
// mark particle with corresponding process
sortArray[bodyIndex + offset] = proc;
offset += stride;
}
}
__global__ void copyArrayKernel(float *targetArray, float *sourceArray, int n) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < n) {
targetArray[bodyIndex + offset] = sourceArray[bodyIndex + offset];
offset += stride;
}
}
//TODO: use template function
__global__ void resetFloatArrayKernel(float *array, float value, int n) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < n) {
array[bodyIndex + offset] = value;
offset += stride;
}
}
//TODO: deletable, but used as print-out/debug kernel
__global__ void debugKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, SubDomainKeyTree *s, int *procCounter,
float *tempArray, int *sortArray, int *sortArrayOut) {
for (int i=0; i<8; i++) {
printf("child[%i] = %i\n", i, child[i]);
for (int k=0; k<8; k++) {
printf("\tchild[8*child[%i] + %i] = %i\n", i, k, child[8*child[i] + k]);
}
}
}
// Kernel 2: hierarchically subdivides the root cells
__global__ void buildTreeKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//note: -1 used as "null pointer"
//note: -2 used to lock a child (pointer)
int offset;
bool newBody = true;
float min_x;
float max_x;
float min_y;
float max_y;
float min_z;
float max_z;
int childPath;
int temp;
int tempTemp;
offset = 0;
while ((bodyIndex + offset) < n) {
if (newBody) {
newBody = false;
// copy bounding box
min_x = *minX;
max_x = *maxX;
min_y = *minY;
max_y = *maxY;
min_z = *minZ;
max_z = *maxZ;
temp = 0;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
}
int childIndex = child[temp*8 + childPath];
// traverse tree until hitting leaf node
while (childIndex >= m) { //n
tempTemp = temp;
temp = childIndex;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
if (mass[bodyIndex + offset] != 0) {
atomicAdd(&x[temp], mass[bodyIndex + offset] * x[bodyIndex + offset]);
atomicAdd(&y[temp], mass[bodyIndex + offset] * y[bodyIndex + offset]);
atomicAdd(&z[temp], mass[bodyIndex + offset] * z[bodyIndex + offset]);
}
atomicAdd(&mass[temp], mass[bodyIndex + offset]);
atomicAdd(&count[temp], 1);
childIndex = child[8*temp + childPath];
}
// if child is not locked
if (childIndex != -2) {
int locked = temp * 8 + childPath;
if (atomicCAS(&child[locked], childIndex, -2) == childIndex) {
// check whether a body is already stored at the location
if (childIndex == -1) {
//insert body and release lock
child[locked] = bodyIndex + offset;
}
else {
if (childIndex >= n) {
printf("ATTENTION!\n");
}
int patch = 8 * m; //8*n
while (childIndex >= 0 && childIndex < n) { // was n
//create a new cell (by atomically requesting the next unused array index)
int cell = atomicAdd(index, 1);
patch = min(patch, cell);
if (patch != cell) {
child[8 * temp + childPath] = cell;
}
// insert old/original particle
childPath = 0;
if (x[childIndex] < 0.5 * (min_x + max_x)) { childPath += 1; }
if (y[childIndex] < 0.5 * (min_y + max_y)) { childPath += 2; }
if (z[childIndex] < 0.5 * (min_z + max_z)) { childPath += 4; }
x[cell] += mass[childIndex] * x[childIndex];
y[cell] += mass[childIndex] * y[childIndex];
z[cell] += mass[childIndex] * z[childIndex];
mass[cell] += mass[childIndex];
count[cell] += count[childIndex];
child[8 * cell + childPath] = childIndex;
start[cell] = -1;
// insert new particle
tempTemp = temp;
temp = cell;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) {
childPath += 1;
max_x = 0.5 * (min_x + max_x);
} else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) {
childPath += 2;
max_y = 0.5 * (min_y + max_y);
} else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) {
childPath += 4;
max_z = 0.5 * (min_z + max_z);
} else {
min_z = 0.5 * (min_z + max_z);
}
// COM / preparing for calculation of COM
if (mass[bodyIndex + offset] != 0) {
x[cell] += mass[bodyIndex + offset] * x[bodyIndex + offset];
y[cell] += mass[bodyIndex + offset] * y[bodyIndex + offset];
z[cell] += mass[bodyIndex + offset] * z[bodyIndex + offset];
mass[cell] += mass[bodyIndex + offset];
}
count[cell] += count[bodyIndex + offset];
childIndex = child[8 * temp + childPath];
}
child[8 * temp + childPath] = bodyIndex + offset;
__threadfence(); // written to global memory arrays (child, x, y, mass) thus need to fence
child[locked] = patch;
}
offset += stride;
newBody = true;
}
}
__syncthreads();
}
}
// (currently) not needed: start
// idea: assign already existing domain list nodes and add missing ones
__global__ void buildDomainTreeKernel(int *domainListIndex, unsigned long *domainListKeys, int *domainListLevels,
int *domainListIndices, float *x, float *y, float *z, float *mass, float *minX,
float *maxX, float *minY, float *maxY, float *minZ, float *maxZ, int *count,
int *start, int *child, int *index, int n, int m) {
int domainListCounter = 0;
//char keyAsChar[21 * 2 + 3];
int path[21];
float min_x, max_x, min_y, max_y, min_z, max_z;
int currentChild;
int childPath;
bool insert = true;
int childIndex;
int temp;
// loop over domain list indices (over the keys found/generated by createDomainListKernel)
for (int i = 0; i < *domainListIndex; i++) {
//key2Char(domainListKeys[i], 21, keyAsChar);
//printf("buildDomainTree: domainListLevels[%i] = %i\n", i, domainListLevels[i]);
//printf("domain: domainListKeys[%i] = %lu = %s (level: %i)\n", i, domainListKeys[i], keyAsChar, domainListLevels[i]);
childIndex = 0;
//temp = 0;
// iterate through levels (of corresponding domainListIndex)
for (int j = 0; j < domainListLevels[i]; j++) {
path[j] = (int) (domainListKeys[i] >> (21 * 3 - 3 * (j + 1)) & (int)7);
temp = childIndex;
childIndex = child[8*childIndex + path[j]];
if (childIndex < n) {
if (childIndex == -1 /*&& childIndex < n*/) {
// no child at all here, thus add node
int cell = atomicAdd(index, 1);
child[8 * temp + path[j]] = cell;
childIndex = cell;
domainListIndices[domainListCounter] = childIndex; //cell;
domainListCounter++;
} else {
// child is a leaf, thus add node in between
int cell = atomicAdd(index, 1);
child[8 * /*childIndex*/temp + path[j]] = cell;
//printf("\tchild[8*%i + %i] = %i\n", temp, path[j], cell);
min_x = *minX;
max_x = *maxX;
min_y = *minY;
max_y = *maxY;
min_z = *minZ;
max_z = *maxZ;
for (int k=0; k<=j; k++) {
currentChild = path[k];
//printf("adding path[%i] = %i (j = %i)\n", k, path[k], j);
if (currentChild % 2 != 0) {
max_x = 0.5 * (min_x + max_x);
currentChild -= 1;
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (currentChild % 2 == 0 && currentChild % 4 != 0) {
max_y = 0.5 * (min_y + max_y);
currentChild -= 2;
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (currentChild == 4) {
max_z = 0.5 * (min_z + max_z);
currentChild -= 4;
}
else {
min_z = 0.5 * (min_z + max_z);
}
//printf("\t\t currentChild[%i] = %i %i\n", k, currentChild, path[k]);
}
// insert old/original particle
childPath = 0; //(int) (domainListKeys[i] >> (21 * 3 - 3 * ((j+1) + 1)) & (int)7); //0; //currentChild; //0;
if (x[childIndex] < 0.5 * (min_x + max_x)) {
childPath += 1;
//max_x = 0.5 * (min_x + max_x);
}
//else {
// min_x = 0.5 * (min_x + max_x);
//}
if (y[childIndex] < 0.5 * (min_y + max_y)) {
childPath += 2;
//max_y = 0.5 * (min_y + max_y);
}
//else {
// min_y = 0.5 * (min_y + max_y);
//}
if (z[childIndex] < 0.5 * (min_z + max_z)) {
childPath += 4;
//max_z = 0.5 * (min_z + max_z);
}
//else {
// min_z = 0.5 * (min_z + max_z);
//}
x[cell] += mass[childIndex] * x[childIndex];
y[cell] += mass[childIndex] * y[childIndex];
z[cell] += mass[childIndex] * z[childIndex];
mass[cell] += mass[childIndex];
//printf("path = %i\n", (int) (domainListKeys[i] >> (21 * 3 - 3 * ((j+1) + 1)) & (int)7));
//printf("j = %i, domainListLevels[%i] = %i\n", j, i, domainListLevels[i]);
printf("adding node in between for index %i cell = %i (childPath = %i, j = %i)! x = (%f, %f, %f)\n",
childIndex, cell, childPath, j, x[childIndex], y[childIndex], z[childIndex]);
//for (int l=0; l<=j; l++) {
// printf("\tpath[%i] = %i\n", l, path[l]);
//}
child[8 * cell + childPath] = childIndex;
//printf("child[8 * %i + %i] = %i\n", cell, childPath, childIndex);
childIndex = cell;
domainListIndices[domainListCounter] = childIndex; //temp;
domainListCounter++;
}
}
else {
insert = true;
// check whether node already marked as domain list node
for (int k=0; k<domainListCounter; k++) {
if (childIndex == domainListIndices[k]) {
insert = false;
break;
}
}
if (insert) {
// mark/save node as domain list node
domainListIndices[domainListCounter] = childIndex; //temp;
domainListCounter++;
}
}
}
}
//printf("domainListCounter = %i\n", domainListCounter);
}
// extract lowest domain list nodes from domain list nodes
// lowest domain list node = domain list node with children not being domain list nodes!
__global__ void lowestDomainListNodesKernel(int *domainListIndices, int *domainListIndex,
unsigned long *domainListKeys,
int *lowestDomainListIndices, int *lowestDomainListIndex,
unsigned long *lowestDomainListKeys,
float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int n, int m, int *procCounter) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
bool lowestDomainListNode;
int domainIndex;
int lowestDomainIndex;
int childIndex;
// check all domain list nodes
while ((bodyIndex + offset) < *domainListIndex) {
lowestDomainListNode = true;
// get domain list index of current domain list node
domainIndex = domainListIndices[bodyIndex + offset];
// check all children
for (int i=0; i<8; i++) {
childIndex = child[8 * domainIndex + i];
// check whether child exists
if (childIndex != -1) {
// check whether child is a node
if (childIndex >= n) {
// check if this node is a domain list node
for (int k=0; k<*domainListIndex; k++) {
if (childIndex == domainListIndices[k]) {
//printf("domainIndex = %i childIndex: %i domainListIndices: %i\n", domainIndex,
// childIndex, domainListIndices[k]);
lowestDomainListNode = false;
break;
}
}
// one child being a domain list node is sufficient for not being a lowest domain list node
if (!lowestDomainListNode) {
break;
}
}
}
}
if (lowestDomainListNode) {
// increment lowest domain list counter/index
lowestDomainIndex = atomicAdd(lowestDomainListIndex, 1);
// add/save index of lowest domain list node
lowestDomainListIndices[lowestDomainIndex] = domainIndex;
// add/save key of lowest domain list node
lowestDomainListKeys[lowestDomainIndex] = domainListKeys[bodyIndex + offset];
// debugging
//printf("Adding lowest domain list node #%i (key = %lu)\n", lowestDomainIndex,
// lowestDomainListKeys[lowestDomainIndex]);
}
offset += stride;
}
}
//for debugging purposes
__global__ void treeInfoKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, int *procCounter, SubDomainKeyTree *s,
int *sortArray, int *sortArrayOut) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
unsigned long key;
int toCheck;
int toCheckSorted;
int proc;
int offset = 0;
// ---- check whether particles exist that do not belong to this process
/*while ((bodyIndex + offset) < procCounter[0]) {
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset], minX, maxX,
minY, maxY, minZ, maxZ, 21);
proc = key2proc(key, s);
if (proc != s->rank) {
printf("ATTENTION: myrank = %i and proc = %i (bodyIndex + offset = %i)\n", s->rank, proc,
bodyIndex + offset);
}
offset += stride;
}*/
// ----------------------------------------------------------------------
while ((bodyIndex + offset) < 8) {
printf("rank[%i] count[%i] = %i\n", s->rank, bodyIndex+offset, count[child[bodyIndex+offset]]);
offset += stride;
}
/*// ---- general information about particles ....
while ((bodyIndex + offset) < n) {
if ((bodyIndex + offset) % 100000 == 0) {
printf("particle[%i]: x = (%f, %f, %f) m = %f\n", bodyIndex+offset, x[bodyIndex+offset],
y[bodyIndex+offset], z[bodyIndex+offset], mass[bodyIndex+offset]);
}
offset += stride;
}
// ----------------------------------------------------------------------*/
}
__global__ void domainListInfoKernel(float *x, float *y, float *z, float *mass, int *child, int *index, int n,
int *domainListIndices, int *domainListIndex,
int *domainListLevels, int *lowestDomainListIndices,
int *lowestDomainListIndex, SubDomainKeyTree *s) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < *domainListIndex) {
/*printf("[rank %i] domainListIndices[%i] = %i x = (%f, %f, %f) m = %f\n", s->rank, bodyIndex + offset,
domainListIndices[bodyIndex + offset], x[domainListIndices[bodyIndex + offset]],
y[domainListIndices[bodyIndex + offset]], z[domainListIndices[bodyIndex + offset]],
mass[domainListIndices[bodyIndex + offset]]);*/
/*if (mass[domainListIndices[bodyIndex + offset]] == 0.f) {
for (int i=0; i<8; i++) {
printf("[rank %i] domainListIndices[%i] child[%i] = %i\n", s->rank, bodyIndex + offset, i,
child[8*domainListIndices[bodyIndex + offset] + i]);
}
}*/
offset += stride;
}
}
// convert key (unsigned long) to more readable level-wise (and separated) string/char-array
__device__ void key2Char(unsigned long key, int maxLevel, char *keyAsChar) {
int level[21];
for (int i=0; i<maxLevel; i++) {
level[i] = (int)(key >> (maxLevel*3 - 3*(i+1)) & (int)7);
}
for (int i=0; i<=maxLevel; i++) {
keyAsChar[2*i] = level[i] + '0';
keyAsChar[2*i+1] = '|';
}
keyAsChar[2*maxLevel+3] = '\0';
}
/*// table needed to convert from Lebesgue to Hilbert keys
__device__ const unsigned char DirTable[12][8] =
{ { 8,10, 3, 3, 4, 5, 4, 5}, { 2, 2,11, 9, 4, 5, 4, 5},
{ 7, 6, 7, 6, 8,10, 1, 1}, { 7, 6, 7, 6, 0, 0,11, 9},
{ 0, 8, 1,11, 6, 8, 6,11}, {10, 0, 9, 1,10, 7, 9, 7},
{10, 4, 9, 4,10, 2, 9, 3}, { 5, 8, 5,11, 2, 8, 3,11},
{ 4, 9, 0, 0, 7, 9, 2, 2}, { 1, 1, 8, 5, 3, 3, 8, 6},
{11, 5, 0, 0,11, 6, 2, 2}, { 1, 1, 4,10, 3, 3, 7,10} };
// table needed to convert from Lebesgue to Hilbert keys
__device__ const unsigned char HilbertTable[12][8] = { {0,7,3,4,1,6,2,5}, {4,3,7,0,5,2,6,1}, {6,1,5,2,7,0,4,3},
{2,5,1,6,3,4,0,7}, {0,1,7,6,3,2,4,5}, {6,7,1,0,5,4,2,3},
{2,3,5,4,1,0,6,7}, {4,5,3,2,7,6,0,1}, {0,3,1,2,7,4,6,5},
{2,1,3,0,5,6,4,7}, {4,7,5,6,3,0,2,1}, {6,5,7,4,1,2,0,3} };*/
// convert Lebesgue key to Hilbert key
__device__ unsigned long Lebesgue2Hilbert(unsigned long lebesgue, int maxLevel) {
unsigned long hilbert = 0UL;
int dir = 0;
for (int lvl=maxLevel; lvl>0; lvl--) {
unsigned long cell = (lebesgue >> ((lvl-1)*3)) & (unsigned long)((1<<3)-1);
hilbert = hilbert << 3;
if (lvl > 0) {
hilbert += HilbertTable[dir][cell];
}
dir = DirTable[dir][cell];
}
return hilbert;
}
// calculate particle key (Lebesgue) per particle based on position (resulting in a overdetermined key)
__device__ unsigned long getParticleKeyPerParticle(float x, float y, float z,
float *minX, float *maxX, float *minY,
float *maxY, float *minZ, float *maxZ,
int maxLevel) {
int level = 0;
unsigned long particleKey = 0UL;
int sonBox = 0;
float min_x = *minX;
float max_x = *maxX;
float min_y = *minY;
float max_y = *maxY;
float min_z = *minZ;
float max_z = *maxZ;
// calculate path to the particle's position assuming an octree with above bounding boxes
while (level <= maxLevel) {
sonBox = 0;
// find insertion point for body
if (x < 0.5 * (min_x+max_x)) {
sonBox += 1;
max_x = 0.5 * (min_x+max_x);
}
else { min_x = 0.5 * (min_x+max_x); }
if (y < 0.5 * (min_y+max_y)) {
sonBox += 2;
max_y = 0.5 * (min_y + max_y);
}
else { min_y = 0.5 * (min_y + max_y); }
if (z < 0.5 * (min_z+max_z)) {
sonBox += 4;
max_z = 0.5 * (min_z + max_z);
}
else { min_z = 0.5 * (min_z + max_z); }
particleKey = particleKey | ((unsigned long)sonBox << (unsigned long)(3 * (maxLevel-level-1)));
level ++;
}
//TODO: Hilbert change
return particleKey;
//return Lebesgue2Hilbert(particleKey, 21);
}
// only for testing...
// calculating the key for all particles
__global__ void getParticleKeyKernel(float *x, float *y, float *z, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, unsigned long *key, int maxLevel, int n, SubDomainKeyTree *s) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long particleKey;
unsigned long hilbertParticleKey;
//char keyAsChar[21 * 2 + 3];
/*//debugging
if (bodyIndex == 0) {
char rangeAsChar[21 * 2 + 3];
for (int i=0; i<(s->numProcesses + 1); i++) {
key2Char(s->range[i], 21, rangeAsChar);
printf("range[%i] = %lu (%s)\n", i, s->range[i], rangeAsChar);
}
}
//end: debugging*/
while (bodyIndex + offset < n) {
particleKey = 0UL;
particleKey = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, maxLevel);
//char keyAsChar[21 * 2 + 3];
hilbertParticleKey = Lebesgue2Hilbert(particleKey, 21);
key[bodyIndex + offset] = particleKey; //hilbertParticleKey;
//int proc = key2proc(particleKey, s);
//key2Char(testKey, 21, keyAsChar);
//key2Char(hilbertParticleKey, 21, keyAsChar);
//if ((bodyIndex + offset) % 5000 == 0) {
//printf("key[%i]: %lu\n", bodyIndex + offset, testKey);
//for (int proc=0; proc<=s->numProcesses; proc++) {
// printf("range[%i] = %lu\n", proc, s->range[proc]);
//}
//printf("key[%i]: %s = %lu (proc = %i)\n", bodyIndex + offset, keyAsChar, particleKey, proc);
//}
offset += stride;
}
}
// get the corresponding process of a key (using the range within the SubDomainKeyTree)
__device__ int key2proc(unsigned long k, SubDomainKeyTree *s, int curveType) {
if (curveType == 0) {
for (int proc=0; proc<s->numProcesses; proc++) {
if (k >= s->range[proc] && k < s->range[proc+1]) {
return proc;
}
}
}
else {
unsigned long hilbert = Lebesgue2Hilbert(k, 21);
for (int proc = 0; proc < s->numProcesses; proc++) {
if (hilbert >= s->range[proc] && hilbert < s->range[proc + 1]) {
return proc;
}
}
}
//printf("ERROR: key2proc(k=%lu): -1!", k);
return -1; // error
}
// Traversing the tree iteratively using an explicit stack
// not used (yet)
__global__ void traverseIterativeKernel(float *x, float *y, float *z, float *mass, int *child, int n, int m,
SubDomainKeyTree *s, int maxLevel) {
// starting traversing with the child[0, ..., 7] representing the first level of the tree
// explicit stack using shared memory
__shared__ int stack[128];
__shared__ int *stackPtr;
stackPtr = stack;
*stackPtr++ = NULL;
int childIndex;
int node;
int particleCounter = 0;
for (int j=0; j<8; j++) {
childIndex;
node = n;
stack[0] = child[j];
stackPtr = stack;
//counter = 0;
while (node != NULL /*&& counter < 200000*/) {
//counter++;
childIndex = *stackPtr;
for (int i=0; i<8; i++) {
if (child[8*childIndex + i] == -1) { /*do nothing*/ }
else {
if (child[8*childIndex + i] < n) {
particleCounter++;
}
else {
*stackPtr++ = child[8*childIndex + i]; //push
}
}
}
node = *--stackPtr; //pop
}
}
printf("Finished traversing iteratively! particleCounter = %i\n", particleCounter);
}
// get the domain list keys (and levels) resulting from ranges (within the SubDomainKeyTree)
// domain list nodes = common coarse tree for all processes
__global__ void createDomainListKernel(SubDomainKeyTree *s, int maxLevel, unsigned long *domainListKeys, int *levels,
int *index, int curveType) {
char keyAsChar[21 * 2 + 3];
// workaround for fixing bug... in principle: unsigned long keyMax = (1 << 63) - 1;
unsigned long shiftValue = 1;
unsigned long toShift = 63;
unsigned long keyMax = (shiftValue << toShift) - 1; // 1 << 63 not working!
//key2Char(keyMax, 21, keyAsChar); //printf("keyMax: %lu = %s\n", keyMax, keyAsChar);
unsigned long key2test = 0UL;
int level = 0;
level++;
// in principle: traversing a (non-existent) octree by walking the 1D spacefilling curve (keys of the tree nodes)
while (key2test < keyMax) {
if (isDomainListNode(key2test & (~0UL << (3 * (maxLevel - level + 1))), maxLevel, level-1, s, curveType)) {
// add domain list key
domainListKeys[*index] = key2test;
// add domain list level
levels[*index] = level;
*index += 1;
if (isDomainListNode(key2test, maxLevel, level, s, curveType)) {
level++;
}
else {
key2test = key2test + (1UL << 3 * (maxLevel - level));
}
} else {
level--;
// not necessary... 1 = 1
//key2test = keyMaxLevel(key2test & (~0UL << (3 * (maxLevel - level))), maxLevel, level, s) + 1 - (1UL << (3 * (maxLevel - level)));
}
}
//for (int i=0; i < *index; i++) {
// key2Char(domainListKeys[i], 21, keyAsChar);
//}
}
// check whether node is a domain list node
__device__ bool isDomainListNode(unsigned long key, int maxLevel, int level, SubDomainKeyTree *s, int curveType) {
int p1 = key2proc(key, s, curveType);
int p2 = key2proc(key | ~(~0UL << 3*(maxLevel-level)), s, curveType);
if (p1 != p2) {
return true;
}
else {
return false;
}
}
// get the maximal key of a key regarding a specific level
__device__ unsigned long keyMaxLevel(unsigned long key, int maxLevel, int level, SubDomainKeyTree *s) {
unsigned long keyMax = key | ~(~0UL << 3*(maxLevel-level));
return keyMax;
}
__global__ void prepareLowestDomainExchangeKernel(float *entry, float *mass, float *tempArray, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int index;
int lowestDomainIndex;
//copy x, y, z, mass of lowest domain list nodes into arrays
//sorting using cub (not here)
while ((bodyIndex + offset) < *lowestDomainListIndex) {
//if (bodyIndex + offset == 0) {
// printf("lowestDomainListIndex = %i\n", *lowestDomainListIndex);
//}
lowestDomainIndex = lowestDomainListIndices[bodyIndex + offset];
if (lowestDomainIndex >= 0) {
tempArray[bodyIndex+offset] = entry[lowestDomainIndex];
}
offset += stride;
}
//serial solution
/*for (int i=0; i<*lowestDomainListIndex; i++) {
tempArray[i] = entry[lowestDomainListIndices[i]];
}*/
}
//TODO: it is not necessary to calculate the moment (x_i * m), thus just use prepareLowestDomainExchangeKernel?
__global__ void prepareLowestDomainExchangeMassKernel(float *mass, float *tempArray, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int index;
int lowestDomainIndex;
//copy x, y, z, mass of lowest domain list nodes into arrays
//sorting using cub (not here)
while ((bodyIndex + offset) < *lowestDomainListIndex) {
lowestDomainIndex = lowestDomainListIndices[bodyIndex + offset];
if (lowestDomainIndex >= 0) {
tempArray[bodyIndex + offset] = mass[lowestDomainIndex];
printf("lowestDomainListIndex[%i]: mass = %f\n", bodyIndex+offset, tempArray[bodyIndex + offset]);
}
offset += stride;
}
}
//TODO: problem since not deterministic? keys are not unique
// at least the domain list nodes in general, but the lowest domain list nodes as well?
__global__ void updateLowestDomainListNodesKernel(float *tempArray, float *entry, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
unsigned long *sortedLowestDomainListKeys, int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int originalIndex = -1;
while ((bodyIndex + offset) < *lowestDomainListIndex) {
for (int i=0; i<*lowestDomainListIndex; i++) {
if (sortedLowestDomainListKeys[bodyIndex + offset] == lowestDomainListKeys[i]) {
originalIndex = i;
//break;
}
}
if (originalIndex == -1) {
printf("ATTENTION: originalIndex = -1 (index = %i)!\n", sortedLowestDomainListKeys[bodyIndex + offset]);
}
entry[lowestDomainListIndices[originalIndex]] = tempArray[bodyIndex + offset];
offset += stride;
}
}
__global__ void compLowestDomainListNodesKernel(float *x, float *y, float *z, float *mass, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
unsigned long *sortedLowestDomainListKeys, int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int lowestDomainIndex;
while ((bodyIndex + offset) < *lowestDomainListIndex) {
lowestDomainIndex = lowestDomainListIndices[bodyIndex + offset];
if (mass[lowestDomainIndex] != 0) {
x[lowestDomainIndex] /= mass[lowestDomainIndex];
y[lowestDomainIndex] /= mass[lowestDomainIndex];
z[lowestDomainIndex] /= mass[lowestDomainIndex];
}
// debugging
//printf("lowestDomainIndex = %i x = (%f, %f, %f) m = %f (key: %lu)\n", lowestDomainIndex, x[lowestDomainIndex],
// y[lowestDomainIndex], z[lowestDomainIndex], mass[lowestDomainIndex], lowestDomainListKeys[bodyIndex + offset]);
offset += stride;
}
}
__global__ void zeroDomainListNodesKernel(int *domainListIndex, int *domainListIndices,
int *lowestDomainListIndex, int *lowestDomainListIndices,
float *x, float *y, float *z, float *mass) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int domainIndex;
bool zero;
while ((bodyIndex + offset) < *domainListIndex) {
zero = true;
domainIndex = domainListIndices[bodyIndex + offset];
for (int i=0; i<*lowestDomainListIndex-1; i++) {
if (domainIndex = lowestDomainListIndices[i]) {
zero = false;
}
}
if (zero) {
x[domainIndex] = 0.f;
y[domainIndex] = 0.f;
z[domainIndex] = 0.f;
mass[domainIndex] = 0.f;
}
offset += stride;
}
}
//TODO: lowest domain list nodes or domain list nodes?
__global__ void compLocalPseudoParticlesParKernel(float *x, float *y, float *z, float *mass, int *index, int n,
int *domainListIndices, int *domainListIndex,
int *lowestDomainListIndices, int *lowestDomainListIndex) {
//equivalent to centreOfMassKernel !?
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bool domainList;
//note: most of it already done within buildTreeKernel
bodyIndex += n;
while (bodyIndex + offset < *index) {
domainList = false;
for (int i=0; i<*domainListIndex; i++) {
if ((bodyIndex + offset) == domainListIndices[i]) {
domainList = true; // hence do not insert
//for (int j=0; j<*lowestDomainListIndex; j++) {
// if ((bodyIndex + offset) == lowestDomainListIndices[j]) {
// domainList = false;
// break;
// }
//}
break;
}
}
if (mass[bodyIndex + offset] != 0 && !domainList) {
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
z[bodyIndex + offset] /= mass[bodyIndex + offset];
}
offset += stride;
}
}
//TODO: check functionality
__global__ void compDomainListPseudoParticlesParKernel(float *x, float *y, float *z, float *mass, int *child, int *index, int n,
int *domainListIndices, int *domainListIndex,
int *domainListLevels, int *lowestDomainListIndices,
int *lowestDomainListIndex) {
//calculate position (center of mass) and mass for domain list nodes
//Problem: start with "deepest" nodes
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset;
int domainIndex;
int level = 21; // max level
bool compute;
// go from max level to level=0
while (level >= 0) {
offset = 0;
compute = true;
while ((bodyIndex + offset) < *domainListIndex) {
compute = true;
domainIndex = domainListIndices[bodyIndex + offset];
for (int i=0; i<*lowestDomainListIndex; i++) {
if (domainIndex == lowestDomainListIndices[i]) {
compute = false;
}
}
if (compute && domainListLevels[bodyIndex + offset] == level) {
// do the calculation
for (int i=0; i<8; i++) {
x[domainIndex] += x[child[8*domainIndex + i]] * mass[child[8*domainIndex + i]];
y[domainIndex] += y[child[8*domainIndex + i]] * mass[child[8*domainIndex + i]];
z[domainIndex] += z[child[8*domainIndex + i]] * mass[child[8*domainIndex + i]];
mass[domainIndex] += mass[child[8*domainIndex + i]];
}
if (mass[domainIndex] != 0) {
x[domainIndex] /= mass[domainIndex];
y[domainIndex] /= mass[domainIndex];
z[domainIndex] /= mass[domainIndex];
}
// debugging
//printf("domain node: key = %lu x = (%f, %f, %f) m = %f\n", domainListIndices[bodyIndex + offset],
// x[domainIndex], y[domainIndex], z[domainIndex], mass[domainIndex]);
}
offset += stride;
}
__syncthreads();
level--;
}
}
// Kernel 3: computes the COM for each cell
__global__ void centreOfMassKernel(float *x, float *y, float *z, float *mass, int *index, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
//note: most of it already done within buildTreeKernel
bodyIndex += n;
while (bodyIndex + offset < *index) {
if (mass[bodyIndex + offset] == 0) {
printf("centreOfMassKernel: mass = 0 (%i)!\n", bodyIndex + offset);
}
if (mass != 0) {
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
z[bodyIndex + offset] /= mass[bodyIndex + offset];
}
offset += stride;
}
}
// Kernel 4: sorts the bodies
__global__ void sortKernel(int *count, int *start, int *sorted, int *child, int *index, int n, int m)
{
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
if (bodyIndex == 0) {
int sumParticles = 0;
for (int i=0; i<8; i++) {
sumParticles += count[child[i]];
}
printf("sumParticles = %i\n", sumParticles);
}
int s = 0;
if (threadIdx.x == 0) {
for (int i=0; i<8; i++){
int node = child[i];
// not a leaf node
if (node >= m) { //n
start[node] = s;
s += count[node];
}
// leaf node
else if (node >= 0) {
sorted[s] = node;
s++;
}
}
}
int cell = m + bodyIndex;
int ind = *index;
//int counter = 0; // for debugging purposes or rather to achieve the kernel to be finished
while ((cell + offset) < ind /*&& counter < 100000*/) {
//counter++;
//if (counter > 99998) {
//printf("cell + offset = %i\n", cell+offset);
//}
s = start[cell + offset];
if (s >= 0) {
for (int i=0; i<8; i++) {
int node = child[8*(cell+offset) + i];
// not a leaf node
if (node >= m) { //m
start[node] = s;
s += count[node];
}
// leaf node
else if (node >= 0) {
sorted[s] = node;
s++;
}
}
offset += stride;
}
}
}
// Kernel 5: computes the (gravitational) forces
__global__ void computeForcesKernel(float* x, float *y, float *z, float *vx, float *vy, float *vz,
float *ax, float *ay, float *az, float *mass,
int *sorted, int *child, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m,
float g, int blockSize, int warp, int stackSize, SubDomainKeyTree *s)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
//debug
unsigned long key;
//__shared__ float depth[stackSize * blockSize/warp];
// stack controlled by one thread per warp
//__shared__ int stack[stackSize * blockSize/warp];
extern __shared__ float buffer[];
float* depth = (float*)buffer;
float* stack = (float*)&depth[stackSize* blockSize/warp];
float x_radius = 0.5*(*maxX - (*minX));
float y_radius = 0.5*(*maxY - (*minY));
float z_radius = 0.5*(*maxZ - (*minZ));
float radius_max = fmaxf(x_radius, y_radius);
float radius = fmaxf(radius_max, z_radius);
// in case that one of the first 8 children are a leaf
int jj = -1;
for (int i=0; i<8; i++) {
if (child[i] != -1) {
jj++;
}
}
int counter = threadIdx.x % warp;
int stackStartIndex = stackSize*(threadIdx.x / warp);
while ((bodyIndex + offset) < m) {
int sortedIndex = sorted[bodyIndex + offset];
float pos_x = x[sortedIndex];
float pos_y = y[sortedIndex];
float pos_z = z[sortedIndex];
float acc_x = 0.0;
float acc_y = 0.0;
float acc_z = 0.0;
// initialize stack
int top = jj + stackStartIndex;
if (counter == 0) {
int temp = 0;
for (int i=0; i<8; i++) {
// if child is not locked
if (child[i] != -1) {
stack[stackStartIndex + temp] = child[i];
depth[stackStartIndex + temp] = radius*radius/theta;
temp++;
}
}
}
__syncthreads();
// while stack is not empty / more nodes to visit
while (top >= stackStartIndex) {
int node = stack[top];
//debug
//if (node > n && node < m) {
// printf("PARALLEL FORCE! (node = %i x = (%f, %f, %f) m = %f)\n", node, x[node], y[node], z[node],
// mass[node]);
//}
//end: debug
float dp = 0.25*depth[top]; // float dp = depth[top];
for (int i=0; i<8; i++) {
int ch = child[8*node + i];
//__threadfence();
if (ch >= 0) {
float dx = x[ch] - pos_x;
float dy = y[ch] - pos_y;
float dz = z[ch] - pos_z;
float r = dx*dx + dy*dy + dz*dz + eps_squared;
//unsigned activeMask = __activemask();
//if (ch < n /*is leaf node*/ || !__any_sync(activeMask, dp > r)) {
if (ch < n /*is leaf node*/ || __all_sync(__activemask(), dp <= r)) {
/*//debug
key = getParticleKeyPerParticle(x[ch], y[ch], z[ch], minX, maxX, minY, maxY,
minZ, maxZ, 21);
if (key2proc(key, s) != s->rank) {
printf("Parallel force! child = %i x = (%f, %f, %f) mass = %f\n", ch, x[ch], y[ch], z[ch], mass[ch]);
}
//end: debug*/
// calculate interaction force contribution
r = rsqrt(r);
float f = mass[ch] * r * r * r;
acc_x += f*dx;
acc_y += f*dy;
acc_z += f*dz;
}
else {
// if first thread in warp: push node's children onto iteration stack
if (counter == 0) {
stack[top] = ch;
depth[top] = dp; // depth[top] = 0.25*dp;
}
top++; // descend to next tree level
//__threadfence();
}
}
else { /*top = max(stackStartIndex, top-1); */}
}
top--;
}
// update body data
ax[sortedIndex] = acc_x;
ay[sortedIndex] = acc_y;
az[sortedIndex] = acc_z;
offset += stride;
__syncthreads();
}
}
// calculating the smallest distance between two nodes
__device__ float smallestDistance(float* x, float *y, float *z, int node1, int node2) {
float dx;
if (x[node1] < x[node2]) {
dx = x[node2] - x[node1];
}
else if (x[node1] > x[node2]) {
dx = x[node1] - x[node2];
}
else {
dx = 0.f;
}
float dy;
if (y[node1] < y[node2]) {
dy = y[node2] - y[node1];
}
else if (y[node1] > y[node2]) {
dy = y[node1] - y[node2];
}
else {
dy = 0.f;
}
float dz;
if (z[node1] < z[node2]) {
dz = z[node2] - z[node1];
}
else if (z[node1] > z[node2]) {
dz = z[node1] - z[node2];
}
else {
dz = 0.f;
}
return sqrtf(dx*dx + dy*dy + dz*dz);
}
//copy non-contiguous array elements into another array contiguously (in order to send them via MPI)
// e.g.: [5, 6, 3, 6, 6, 8] -> relevant indices = [1, 5] -> [6, 8]
__global__ void collectSendIndicesKernel(int *sendIndices, float *entry, float *tempArray, int *domainListCounter,
int sendCount) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int insertIndex;
while ((bodyIndex + offset) < sendCount) {
tempArray[bodyIndex + offset] = entry[sendIndices[bodyIndex + offset]];
offset += stride;
}
}
//ATTENTION: causes duplicate entries, which need to be removed afterwards
__global__ void symbolicForceKernel(int relevantIndex, float *x, float *y, float *z, float *mass, float *minX, float *maxX, float *minY,
float *maxY, float *minZ, float *maxZ, int *child, int *domainListIndex,
unsigned long *domainListKeys, int *domainListIndices, int *domainListLevels,
int *domainListCounter, int *sendIndices, int *index, int *particleCounter,
SubDomainKeyTree *s, int n, int m, float diam, float theta_, int *mutex,
int *relevantDomainListIndices) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
float r;
int insertIndex;
bool insert;
int level;
int childIndex;
//bool redo = false;
while ((bodyIndex + offset) < *index) {
//if ((bodyIndex + offset) == 0) {
// printf("relevantIndex: %i\n", relevantDomainListIndices[relevantIndex]);
//}
insert = true;
//redo = false;
for (int i=0; i<*domainListIndex; i++) {
if ((bodyIndex + offset) == domainListIndices[i]) {
insert = false;
break;
}
}
//if (mass[relevantDomainListIndices[relevantIndex]] == 0) {
// insert = false;
//}
// TODO: CHANGED: relevantIndex -> relevantDomainListIndices[relevantIndex]
if (insert && (bodyIndex + offset) != relevantDomainListIndices[relevantIndex] && ((bodyIndex + offset) < particleCounter[s->rank] || (bodyIndex + offset) > n)) {
//r = smallestDistance(x, y, z, bodyIndex + offset, relevantDomainListIndices[relevantIndex]); //relevantIndex, bodyIndex + offset);
r = smallestDistance(x, y, z, relevantDomainListIndices[relevantIndex], bodyIndex + offset);
//calculate tree level by determining the particle's key and traversing the tree until hitting that particle
level = getTreeLevel(bodyIndex + offset, child, x, y, z, minX, maxX, minY, maxY, minZ, maxZ);
if ((powf(0.5, level) * diam) >= (theta_ * r) && level >= 0) {
//TODO: insert cell itself or children?
/// inserting cell itself
/*//check whether node is a domain list node
for (int i=0; i<*domainListIndex; i++) {
if ((bodyIndex + offset) == domainListIndices[i]) {
insert = false;
break;
//printf("domain list nodes do not need to be sent!\n");
}
}
if (insert) {
//add to indices to be sent
insertIndex = atomicAdd(domainListCounter, 1);
sendIndices[insertIndex] = bodyIndex + offset;
}
else {
}*/
/// inserting children
for (int i=0; i<8; i++) {
childIndex = child[8*(bodyIndex + offset) + i];
//check whether node is already within the indices to be sent
//check whether node is a domain list node
for (int i = 0; i < *domainListIndex; i++) {
if (childIndex == domainListIndices[i]) {
insert = false;
//printf("domain list nodes do not need to be sent!\n");
}
}
if (insert && childIndex != -1) {
//add to indices to be sent
insertIndex = atomicAdd(domainListCounter, 1);
sendIndices[insertIndex] = childIndex;
}
}
}
}
else {
//no particle to examine...
}
offset += stride;
}
}
//reset domainListCounter after compTheta!
__global__ void compThetaKernel(float *x, float *y, float *z, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int *domainListIndex, int *domainListCounter,
unsigned long *domainListKeys, int *domainListIndices, int *domainListLevels,
int *relevantDomainListIndices, SubDomainKeyTree *s, int curveType) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int bodyIndex = 0;
unsigned long key;
int domainIndex;
//"loop" over domain list nodes
while ((index + offset) < *domainListIndex) {
bodyIndex = domainListIndices[index + offset];
//calculate key
key = getParticleKeyPerParticle(x[bodyIndex], y[bodyIndex], z[bodyIndex], minX, maxX, minY, maxY,
minZ, maxZ, 21);
//if domain list node belongs to other process: add to relevant domain list indices
if (key2proc(key, s, curveType) != s->rank) {
domainIndex = atomicAdd(domainListCounter, 1);
relevantDomainListIndices[domainIndex] = bodyIndex;
//printf("relevant domain list index: %i\n", bodyIndex);
}
offset += stride;
}
}
// Kernel 6: updates the bodies/particles
__global__ void updateKernel(float *x, float *y, float *z, float *vx, float *vy, float *vz,
float *ax, float *ay, float *az, int n, float dt, float d) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while (bodyIndex + offset < n) {
// calculating/updating the velocities
vx[bodyIndex + offset] += dt * ax[bodyIndex + offset];
vy[bodyIndex + offset] += dt * ay[bodyIndex + offset];
vz[bodyIndex + offset] += dt * az[bodyIndex + offset];
// calculating/updating the positions
x[bodyIndex + offset] += d * dt * vx[bodyIndex + offset];
y[bodyIndex + offset] += d * dt * vy[bodyIndex + offset];
z[bodyIndex + offset] += d * dt * vz[bodyIndex + offset];
offset += stride;
}
}
//TODO: only update/calculate COM for not domain list nodes?!
__global__ void insertReceivedParticlesKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int *to_delete_leaf, int *domainListIndices,
int *domainListIndex, int *lowestDomainListIndices, int *lowestDomainListIndex,
int n, int m) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//note: -1 used as "null pointer"
//note: -2 used to lock a child (pointer)
int offset;
bool newBody = true;
float min_x;
float max_x;
float min_y;
float max_y;
float min_z;
float max_z;
int childPath;
int temp;
bool isDomainList = false;
offset = 0;
bodyIndex += to_delete_leaf[0];
//if ((bodyIndex + offset) % 10000 == 0) {
// printf("index = %i x = (%f, %f, %f)\n", bodyIndex + offset, x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset]);
//}
while ((bodyIndex + offset) < to_delete_leaf[1] && (bodyIndex + offset) > to_delete_leaf[0]) {
//if ((bodyIndex + offset) % 100 == 0) {
//if (mass[bodyIndex+offset] > 200.f) {
// printf("insert particle %i: x = (%f, %f, %f) m = %f\n", bodyIndex+offset, x[bodyIndex+offset],
// y[bodyIndex+offset], z[bodyIndex+offset], mass[bodyIndex+offset]);
//}
/*//debugging
if ((bodyIndex + offset) % 100 == 0) {
printf("index = %i x = (%f, %f, %f) (index = %i) to_delete_leaf = (%i, %i)\n", bodyIndex + offset, x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset], *index, to_delete_leaf[0], to_delete_leaf[1]);
//printf("index = %i x = (%f, %f, %f) (index = %i) to_delete_leaf = (%i, %i)\n", bodyIndex + offset - 10000, x[bodyIndex + offset - 10000], y[bodyIndex + offset-10000], z[bodyIndex + offset-10000], *index, to_delete_leaf[0], to_delete_leaf[1]);
}
for (int i=to_delete_leaf[0]; i<to_delete_leaf[1]; i++) {
if (i != (bodyIndex + offset)) {
if (x[i] == x[bodyIndex + offset]) {
//printf("ATTENTION: x[%i] = (%f, %f, %f) vs. x[%i] = (%f, %f, %f)\n", i, x[i], y[i], z[i],
// bodyIndex + offset, x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset]);
}
}
}
//end: debugging*/
//debugging
//offset += stride;
if (newBody) {
newBody = false;
isDomainList = false;
min_x = *minX;
max_x = *maxX;
min_y = *minY;
max_y = *maxY;
min_z = *minZ;
max_z = *maxZ;
temp = 0;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
}
int childIndex = child[temp*8 + childPath];
// traverse tree until hitting leaf node
while (childIndex >= m /*&& childIndex < (8*m)*/) { //formerly n
isDomainList = false;
temp = childIndex;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
for (int i=0; i<*domainListIndex; i++) {
if (temp == domainListIndices[i]) {
isDomainList = true;
break;
}
}
//TODO: !!!
if (/*true*/ !isDomainList) {
if (mass[bodyIndex + offset] != 0) {
atomicAdd(&x[temp], mass[bodyIndex + offset] * x[bodyIndex + offset]);
atomicAdd(&y[temp], mass[bodyIndex + offset] * y[bodyIndex + offset]);
atomicAdd(&z[temp], mass[bodyIndex + offset] * z[bodyIndex + offset]);
}
atomicAdd(&mass[temp], mass[bodyIndex + offset]);
//atomicAdd(&count[temp], 1); // do not count, since particles are just temporarily saved on this process
}
atomicAdd(&count[temp], 1); // do not count, since particles are just temporarily saved on this process
childIndex = child[8*temp + childPath];
}
// if child is not locked
if (childIndex != -2) {
int locked = temp * 8 + childPath;
//lock
if (atomicCAS(&child[locked], childIndex, -2) == childIndex) {
// check whether a body is already stored at the location
if (childIndex == -1) {
//insert body and release lock
child[locked] = bodyIndex + offset;
}
else {
int patch = 8 * m; //8*n
while (childIndex >= 0 && childIndex < n) {
//debug
if (x[childIndex] == x[bodyIndex + offset]) {
printf("ATTENTION (shouldn't happen...): x[%i] = (%f, %f, %f) vs. x[%i] = (%f, %f, %f) | to_delete_leaf = (%i, %i)\n",
childIndex, x[childIndex], y[childIndex], z[childIndex], bodyIndex + offset, x[bodyIndex + offset],
y[bodyIndex + offset], z[bodyIndex + offset], to_delete_leaf[0], to_delete_leaf[1]);
}
//create a new cell (by atomically requesting the next unused array index)
int cell = atomicAdd(index, 1);
patch = min(patch, cell);
if (patch != cell) {
child[8 * temp + childPath] = cell;
}
// insert old/original particle
childPath = 0;
if (x[childIndex] < 0.5 * (min_x + max_x)) { childPath += 1; }
if (y[childIndex] < 0.5 * (min_y + max_y)) { childPath += 2; }
if (z[childIndex] < 0.5 * (min_z + max_z)) { childPath += 4; }
x[cell] += mass[childIndex] * x[childIndex];
y[cell] += mass[childIndex] * y[childIndex];
z[cell] += mass[childIndex] * z[childIndex];
mass[cell] += mass[childIndex];
// do not count, since particles are just temporarily saved on this process
count[cell] += count[childIndex];
child[8 * cell + childPath] = childIndex;
start[cell] = -1; //TODO: resetting start needed in insertReceivedParticles()?
// insert new particle
temp = cell;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) {
childPath += 1;
max_x = 0.5 * (min_x + max_x);
} else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) {
childPath += 2;
max_y = 0.5 * (min_y + max_y);
} else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) {
childPath += 4;
max_z = 0.5 * (min_z + max_z);
} else {
min_z = 0.5 * (min_z + max_z);
}
// COM / preparing for calculation of COM
if (mass[bodyIndex + offset] != 0) {
x[cell] += mass[bodyIndex + offset] * x[bodyIndex + offset];
y[cell] += mass[bodyIndex + offset] * y[bodyIndex + offset];
z[cell] += mass[bodyIndex + offset] * z[bodyIndex + offset];
mass[cell] += mass[bodyIndex + offset];
}
// do not count, since particles are just temporarily saved on this process
count[cell] += count[bodyIndex + offset];
childIndex = child[8 * temp + childPath];
}
child[8 * temp + childPath] = bodyIndex + offset;
__threadfence(); // written to global memory arrays (child, x, y, mass) thus need to fence
child[locked] = patch;
}
offset += stride;
newBody = true;
}
else {
}
}
else {
}
__syncthreads();
}
}
__global__ void centreOfMassReceivedParticlesKernel(float *x, float *y, float *z, float *mass, int *startIndex, int *endIndex, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
//note: most of it already done within buildTreeKernel
bodyIndex += *startIndex;
while ((bodyIndex + offset) < *endIndex) {
if (mass[bodyIndex + offset] == 0) {
printf("centreOfMassKernel: mass = 0 (%i)!\n", bodyIndex + offset);
}
if (mass != 0) {
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
z[bodyIndex + offset] /= mass[bodyIndex + offset];
}
offset += stride;
}
}
// probably not needed, since tree is built (newly) for every iteration (step)
__global__ void repairTreeKernel(float *x, float *y, float *z, float *vx, float *vy, float *vz,
float *ax, float *ay, float *az, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int *to_delete_cell, int *to_delete_leaf,
int *domainListIndices, int n, int m) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
//delete inserted leaves
while ((bodyIndex + offset) >= to_delete_leaf[0] && (bodyIndex + offset) < to_delete_leaf[1]) {
for (int i=0; i<8; i++) {
child[(bodyIndex + offset)*8 + i] = -1;
}
count[bodyIndex + offset] = 1;
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
z[bodyIndex + offset] = 0;
vx[bodyIndex + offset] = 0;
vy[bodyIndex + offset] = 0;
vz[bodyIndex + offset] = 0;
ax[bodyIndex + offset] = 0;
ay[bodyIndex + offset] = 0;
az[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
start[bodyIndex + offset] = -1;
//sorted[bodyIndex + offset] = 0;
offset += stride;
}
offset = 0;
//delete inserted cells
while ((bodyIndex + offset) >= to_delete_cell[0] && (bodyIndex + offset) < to_delete_cell[1]) {
for (int i=0; i<8; i++) {
child[(bodyIndex + offset)*8 + i] = -1;
}
count[bodyIndex + offset] = 0;
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
z[bodyIndex + offset] = 0;
vx[bodyIndex + offset] = 0;
vy[bodyIndex + offset] = 0;
vz[bodyIndex + offset] = 0;
ax[bodyIndex + offset] = 0;
ay[bodyIndex + offset] = 0;
az[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
start[bodyIndex + offset] = -1;
//sorted[bodyIndex + offset] = 0;
offset += stride;
}
}
__device__ int getTreeLevel(int index, int *child, float *x, float *y, float *z, float *minX, float *maxX, float *minY,
float *maxY, float *minZ, float *maxZ) {
unsigned long key = getParticleKeyPerParticle(x[index], y[index], z[index], minX, maxX, minY, maxY, minZ, maxZ, 21);
//int proc = key2proc(key, s);
int level = 0; //TODO: initialize level with 0 or 1 for getTreeLevel()?
int childIndex;
int path[21];
for (int i=0; i<21; i++) {
path[i] = (int) (key >> (21*3 - 3 * (i + 1)) & (int)7);
}
childIndex = 0;//child[path[0]];
//TODO: where to put level++ for getTreeLevel()?
for (int i=0; i<21; i++) {
//level++;
//childIndex = child[8*childIndex + path[i]];
if (childIndex == index) {
return level;
}
childIndex = child[8*childIndex + path[i]];
level++;
//childIndex = child[8*childIndex + path[i]];
//level++;
}
childIndex = 0; //child[path[0]];
printf("ATTENTION: level = -1 (index = %i x = (%f, %f, %f))\n", index, x[index], y[index], z[index]);
//printf("\tlevel = -1 childIndex = %i path[%i] = %i\n", childIndex, 0, path[0]);
/*for (int i=0; i<21; i++) {
childIndex = child[8*childIndex + path[i]];
printf("\tlevel = -1 childIndex = %i path[%i] = %i\n", childIndex, i, path[i]);
//for (int ii=0; ii<21; ii++) {
// printf("\t\t child[8*childIndex + %i] = %i\n", ii, child[8*childIndex + ii]);
//}
}*/
return -1;
}
// for debugging purposes
__global__ void findDuplicatesKernel(float *array, float *array_2, int length, SubDomainKeyTree *s, int *duplicateCounter) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < length) {
for (int i=0; i<length; i++) {
if (i != (bodyIndex + offset)) {
if (array[bodyIndex + offset] == array[i] && array_2[bodyIndex + offset] == array_2[i]) {
duplicateCounter[i] += 1;
printf("duplicate! (%i vs. %i) (x = %f, y = %f)\n", i, bodyIndex + offset, array[i], array_2[i]);
}
}
}
offset += stride;
}
}
// mark duplicates within an array (with -1)
__global__ void markDuplicatesKernel(int *indices, float *x, float *y, float *z,
float *mass, SubDomainKeyTree *s, int *counter, int length) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int maxIndex;
//remark: check only x, but in principle check all
while ((bodyIndex + offset) < length) {
if (indices[bodyIndex + offset] != -1) {
for (int i = 0; i < length; i++) {
if (i != (bodyIndex + offset)) {
if (indices[i] != -1 && (x[indices[bodyIndex + offset]] == x[indices[i]] || indices[bodyIndex + offset] == indices[i])) {
maxIndex = max(bodyIndex + offset, i);
// mark larger index with -1 (thus a duplicate)
indices[maxIndex] = -1;
atomicAdd(counter, 1);
}
}
}
}
//__syncthreads();
offset += stride;
}
}
// remove previously marked duplicates or rather copy non-duplicates into another array
__global__ void removeDuplicatesKernel(int *indices, int *removedDuplicatesIndices, int *counter, int length) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int indexToInsert;
while ((bodyIndex + offset) < length) {
if (indices[bodyIndex + offset] != -1) {
indexToInsert = atomicAdd(counter, 1);
removedDuplicatesIndices[indexToInsert] = indices[bodyIndex + offset];
}
offset += stride;
}
}
__global__ void getParticleCount(int *child, int *count, int *particleCount) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < 8) {
//particleCount += count[child[bodyIndex + offset]];
atomicAdd(particleCount, count[child[bodyIndex + offset]]);
offset += stride;
}
}
__global__ void createKeyHistRangesKernel(int bins, unsigned long *keyHistRanges) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long max_key = 1UL << 63;
while ((bodyIndex + offset) < bins) {
keyHistRanges[bodyIndex + offset] = (bodyIndex + offset) * (max_key/bins);
//printf("keyHistRanges[%i] = %lu\n", bodyIndex + offset, keyHistRanges[bodyIndex + offset]);
if ((bodyIndex + offset) == (bins - 1)) {
keyHistRanges[bins-1] = KEY_MAX;
}
offset += stride;
}
}
__global__ void keyHistCounterKernel(unsigned long *keyHistRanges, int *keyHistCounts, int bins, int n,
float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, SubDomainKeyTree *s, int curveType) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long key;
while ((bodyIndex + offset) < n) {
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, 21);
if (curveType == 0) {
for (int i=0; i<(bins); i++) {
if (key >= keyHistRanges[i] && key < keyHistRanges[i+1]) {
//keyHistCounts[i] += 1;
atomicAdd(&keyHistCounts[i], 1);
break;
}
}
}
else {
//TODO: Hilbert change
unsigned long hilbert = Lebesgue2Hilbert(key, 21);
for (int i = 0; i < (bins); i++) {
if (hilbert >= keyHistRanges[i] && hilbert < keyHistRanges[i + 1]) {
//keyHistCounts[i] += 1;
atomicAdd(&keyHistCounts[i], 1);
break;
}
}
}
offset += stride;
}
}
//TODO: rename index
__global__ void calculateNewRangeKernel(unsigned long *keyHistRanges, int *keyHistCounts, int bins, int n,
float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, SubDomainKeyTree *s) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int sum;
unsigned long newRange;
while ((bodyIndex + offset) < (bins-1)) {
sum = 0;
for (int i=0; i<(bodyIndex+offset); i++) {
sum += keyHistCounts[i];
}
for (int i=1; i<s->numProcesses; i++) {
if ((sum + keyHistCounts[bodyIndex + offset]) >= (i*n) && sum < (i*n)) {
printf("[rank %i] new range: %lu\n", s->rank, keyHistRanges[bodyIndex + offset]);
s->range[i] = keyHistRanges[bodyIndex + offset];
}
}
//printf("[rank %i] keyHistCounts[%i] = %i\n", s->rank, bodyIndex+offset, keyHistCounts[bodyIndex+offset]);
atomicAdd(index, keyHistCounts[bodyIndex+offset]);
offset += stride;
}
} | 448f421c6dae993f8c8031840f571ef3109e6bd1.cu | /**
* CUDA Kernel functions.
*
* Notes:
*
* * use `-1` as *null pointer*
* * last-level cell and then attempts to lock the appropriate child pointer (an array index) by writing an
otherwise unused value (−2) to it using an atomic operation
*/
#include "../include/Kernels.cuh"
//__device__ const int blockSize = 256; //256;
//extern __shared__ float buffer[];
//__device__ const int warp = 32;
//__device__ const int stackSize = 64;
__device__ const float eps_squared = 0.0025;
__device__ const float theta = 1.5; //0.5; //1.5; //0.5;
__global__ void resetArraysKernel(int *mutex, float *x, float *y, float *z, float *mass, int *count, int *start,
int *sorted, int *child, int *index, float *minX, float *maxX,
float *minY, float *maxY, float *minZ, float *maxZ, int n, int m,
int *procCounter, int *procCounterTemp) {
int bodyIndex = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
// reset quadtree arrays
while(bodyIndex + offset < m) {
#pragma unroll 8
for (int i=0; i<8; i++) {
child[(bodyIndex + offset)*8 + i] = -1;
}
if (bodyIndex + offset < n) {
count[bodyIndex + offset] = 1;
}
else {
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
z[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
count[bodyIndex + offset] = 0;
}
start[bodyIndex + offset] = -1;
sorted[bodyIndex + offset] = 0;
offset += stride;
}
// reset quadtree pointers
if (bodyIndex == 0) {
*mutex = 0;
*index = n;
*minX = 0;
*maxX = 0;
*minY = 0;
*maxY = 0;
*minZ = 0;
*maxZ = 0;
procCounter[0] = 0;
procCounter[1] = 0;
procCounterTemp[0] = 0;
procCounterTemp[1] = 0;
}
}
__global__ void resetArraysParallelKernel(int *domainListIndex, unsigned long *domainListKeys,
int *domainListIndices, int *domainListLevels,
int *lowestDomainListIndices, int *lowestDomainListIndex,
unsigned long *lowestDomainListKeys, unsigned long *sortedLowestDomainListKeys,
float *tempArray, int *to_delete_cell, int *to_delete_leaf, int n, int m) {
int bodyIndex = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < n) {
tempArray[bodyIndex + offset] = 0;
if ((bodyIndex + offset) < DOMAIN_LIST_SIZE) {
domainListLevels[bodyIndex + offset] = -1;
domainListKeys[bodyIndex + offset] = KEY_MAX;
domainListIndices[bodyIndex + offset] = -1;
lowestDomainListIndices[bodyIndex + offset] = -1;
lowestDomainListKeys[bodyIndex + offset] = KEY_MAX;
sortedLowestDomainListKeys[bodyIndex + offset] = KEY_MAX;
offset += stride;
}
offset += stride;
}
if (bodyIndex == 0) {
*domainListIndex = 0;
*lowestDomainListIndex = 0;
to_delete_cell[0] = -1;
to_delete_cell[1] = -1;
to_delete_leaf[0] = -1;
to_delete_leaf[1] = -1;
}
}
// Kernel 1: computes bounding box around all bodies
__global__ void computeBoundingBoxKernel(int *mutex, float *x, float *y, float *z, float *minX, float *maxX,
float *minY, float *maxY, float *minZ, float *maxZ, int n, int blockSize)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
// initialize local min/max
float x_min = x[index];
float x_max = x[index];
float y_min = y[index];
float y_max = y[index];
float z_min = z[index];
float z_max = z[index];
extern __shared__ float buffer[];
float* x_min_buffer = (float*)buffer;
float* x_max_buffer = (float*)&x_min_buffer[blockSize];
float* y_min_buffer = (float*)&x_max_buffer[blockSize];
float* y_max_buffer = (float*)&y_min_buffer[blockSize];
float* z_min_buffer = (float*)&y_max_buffer[blockSize];
float* z_max_buffer = (float*)&z_min_buffer[blockSize];
int offset = stride;
// find (local) min/max
while (index + offset < n) {
x_min = fminf(x_min, x[index + offset]);
x_max = fmaxf(x_max, x[index + offset]);
y_min = fminf(y_min, y[index + offset]);
y_max = fmaxf(y_max, y[index + offset]);
z_min = fminf(z_min, z[index + offset]);
z_max = fmaxf(z_max, z[index + offset]);
offset += stride;
}
// save value in corresponding buffer
x_min_buffer[threadIdx.x] = x_min;
x_max_buffer[threadIdx.x] = x_max;
y_min_buffer[threadIdx.x] = y_min;
y_max_buffer[threadIdx.x] = y_max;
z_min_buffer[threadIdx.x] = z_min;
z_max_buffer[threadIdx.x] = z_max;
// synchronize threads / wait for unfinished threads
__syncthreads();
int i = blockDim.x/2; // assuming blockDim.x is a power of 2!
// reduction within block
while (i != 0) {
if (threadIdx.x < i) {
x_min_buffer[threadIdx.x] = fminf(x_min_buffer[threadIdx.x], x_min_buffer[threadIdx.x + i]);
x_max_buffer[threadIdx.x] = fmaxf(x_max_buffer[threadIdx.x], x_max_buffer[threadIdx.x + i]);
y_min_buffer[threadIdx.x] = fminf(y_min_buffer[threadIdx.x], y_min_buffer[threadIdx.x + i]);
y_max_buffer[threadIdx.x] = fmaxf(y_max_buffer[threadIdx.x], y_max_buffer[threadIdx.x + i]);
z_min_buffer[threadIdx.x] = fminf(z_min_buffer[threadIdx.x], z_min_buffer[threadIdx.x + i]);
z_max_buffer[threadIdx.x] = fmaxf(z_max_buffer[threadIdx.x], z_max_buffer[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
// combining the results and generate the root cell
if (threadIdx.x == 0) {
while (atomicCAS(mutex, 0 ,1) != 0); // lock
*minX = fminf(*minX, x_min_buffer[0]);
*maxX = fmaxf(*maxX, x_max_buffer[0]);
*minY = fminf(*minY, y_min_buffer[0]);
*maxY = fmaxf(*maxY, y_max_buffer[0]);
*minZ = fminf(*minZ, z_min_buffer[0]);
*maxZ = fmaxf(*maxZ, z_max_buffer[0]);
atomicExch(mutex, 0); // unlock
}
}
// (currently) not needed: mass, count, start, child, index, (counter)
__global__ void particlesPerProcessKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, SubDomainKeyTree *s,
int *procCounter, int *procCounterTemp, int curveType) {
//go over domain list (only the ones inherited by own process) and count particles (using count array)
//BUT: for now use this approach!
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long key;
int proc;
while ((bodyIndex + offset) < n) {
// calculate particle key from particle's position
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, 21);
// get corresponding process
proc = key2proc(key, s, curveType);
// increment corresponding counter
atomicAdd(&procCounter[proc], 1);
offset += stride;
}
}
// (currently) not needed: mass, count, start, child, index, (counter)
__global__ void markParticlesProcessKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, SubDomainKeyTree *s,
int *procCounter, int *procCounterTemp, int *sortArray, int curveType) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long key;
int proc;
int counter;
while ((bodyIndex + offset) < n) {
// calculate particle key from particle's position
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, 21);
// get corresponding process
proc = key2proc(key, s, curveType);
/*// increment corresponding counter
counter = atomicAdd(&procCounterTemp[proc], 1)
if (proc > 0) {
sortArray[bodyIndex + offset] = procCounter[proc-1] + counter;
}
else {
sortArray[bodyIndex + offset] = counter;
}*/
// mark particle with corresponding process
sortArray[bodyIndex + offset] = proc;
offset += stride;
}
}
__global__ void copyArrayKernel(float *targetArray, float *sourceArray, int n) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < n) {
targetArray[bodyIndex + offset] = sourceArray[bodyIndex + offset];
offset += stride;
}
}
//TODO: use template function
__global__ void resetFloatArrayKernel(float *array, float value, int n) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < n) {
array[bodyIndex + offset] = value;
offset += stride;
}
}
//TODO: deletable, but used as print-out/debug kernel
__global__ void debugKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, SubDomainKeyTree *s, int *procCounter,
float *tempArray, int *sortArray, int *sortArrayOut) {
for (int i=0; i<8; i++) {
printf("child[%i] = %i\n", i, child[i]);
for (int k=0; k<8; k++) {
printf("\tchild[8*child[%i] + %i] = %i\n", i, k, child[8*child[i] + k]);
}
}
}
// Kernel 2: hierarchically subdivides the root cells
__global__ void buildTreeKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//note: -1 used as "null pointer"
//note: -2 used to lock a child (pointer)
int offset;
bool newBody = true;
float min_x;
float max_x;
float min_y;
float max_y;
float min_z;
float max_z;
int childPath;
int temp;
int tempTemp;
offset = 0;
while ((bodyIndex + offset) < n) {
if (newBody) {
newBody = false;
// copy bounding box
min_x = *minX;
max_x = *maxX;
min_y = *minY;
max_y = *maxY;
min_z = *minZ;
max_z = *maxZ;
temp = 0;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
}
int childIndex = child[temp*8 + childPath];
// traverse tree until hitting leaf node
while (childIndex >= m) { //n
tempTemp = temp;
temp = childIndex;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
if (mass[bodyIndex + offset] != 0) {
atomicAdd(&x[temp], mass[bodyIndex + offset] * x[bodyIndex + offset]);
atomicAdd(&y[temp], mass[bodyIndex + offset] * y[bodyIndex + offset]);
atomicAdd(&z[temp], mass[bodyIndex + offset] * z[bodyIndex + offset]);
}
atomicAdd(&mass[temp], mass[bodyIndex + offset]);
atomicAdd(&count[temp], 1);
childIndex = child[8*temp + childPath];
}
// if child is not locked
if (childIndex != -2) {
int locked = temp * 8 + childPath;
if (atomicCAS(&child[locked], childIndex, -2) == childIndex) {
// check whether a body is already stored at the location
if (childIndex == -1) {
//insert body and release lock
child[locked] = bodyIndex + offset;
}
else {
if (childIndex >= n) {
printf("ATTENTION!\n");
}
int patch = 8 * m; //8*n
while (childIndex >= 0 && childIndex < n) { // was n
//create a new cell (by atomically requesting the next unused array index)
int cell = atomicAdd(index, 1);
patch = min(patch, cell);
if (patch != cell) {
child[8 * temp + childPath] = cell;
}
// insert old/original particle
childPath = 0;
if (x[childIndex] < 0.5 * (min_x + max_x)) { childPath += 1; }
if (y[childIndex] < 0.5 * (min_y + max_y)) { childPath += 2; }
if (z[childIndex] < 0.5 * (min_z + max_z)) { childPath += 4; }
x[cell] += mass[childIndex] * x[childIndex];
y[cell] += mass[childIndex] * y[childIndex];
z[cell] += mass[childIndex] * z[childIndex];
mass[cell] += mass[childIndex];
count[cell] += count[childIndex];
child[8 * cell + childPath] = childIndex;
start[cell] = -1;
// insert new particle
tempTemp = temp;
temp = cell;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) {
childPath += 1;
max_x = 0.5 * (min_x + max_x);
} else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) {
childPath += 2;
max_y = 0.5 * (min_y + max_y);
} else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) {
childPath += 4;
max_z = 0.5 * (min_z + max_z);
} else {
min_z = 0.5 * (min_z + max_z);
}
// COM / preparing for calculation of COM
if (mass[bodyIndex + offset] != 0) {
x[cell] += mass[bodyIndex + offset] * x[bodyIndex + offset];
y[cell] += mass[bodyIndex + offset] * y[bodyIndex + offset];
z[cell] += mass[bodyIndex + offset] * z[bodyIndex + offset];
mass[cell] += mass[bodyIndex + offset];
}
count[cell] += count[bodyIndex + offset];
childIndex = child[8 * temp + childPath];
}
child[8 * temp + childPath] = bodyIndex + offset;
__threadfence(); // written to global memory arrays (child, x, y, mass) thus need to fence
child[locked] = patch;
}
offset += stride;
newBody = true;
}
}
__syncthreads();
}
}
// (currently) not needed: start
// idea: assign already existing domain list nodes and add missing ones
__global__ void buildDomainTreeKernel(int *domainListIndex, unsigned long *domainListKeys, int *domainListLevels,
int *domainListIndices, float *x, float *y, float *z, float *mass, float *minX,
float *maxX, float *minY, float *maxY, float *minZ, float *maxZ, int *count,
int *start, int *child, int *index, int n, int m) {
int domainListCounter = 0;
//char keyAsChar[21 * 2 + 3];
int path[21];
float min_x, max_x, min_y, max_y, min_z, max_z;
int currentChild;
int childPath;
bool insert = true;
int childIndex;
int temp;
// loop over domain list indices (over the keys found/generated by createDomainListKernel)
for (int i = 0; i < *domainListIndex; i++) {
//key2Char(domainListKeys[i], 21, keyAsChar);
//printf("buildDomainTree: domainListLevels[%i] = %i\n", i, domainListLevels[i]);
//printf("domain: domainListKeys[%i] = %lu = %s (level: %i)\n", i, domainListKeys[i], keyAsChar, domainListLevels[i]);
childIndex = 0;
//temp = 0;
// iterate through levels (of corresponding domainListIndex)
for (int j = 0; j < domainListLevels[i]; j++) {
path[j] = (int) (domainListKeys[i] >> (21 * 3 - 3 * (j + 1)) & (int)7);
temp = childIndex;
childIndex = child[8*childIndex + path[j]];
if (childIndex < n) {
if (childIndex == -1 /*&& childIndex < n*/) {
// no child at all here, thus add node
int cell = atomicAdd(index, 1);
child[8 * temp + path[j]] = cell;
childIndex = cell;
domainListIndices[domainListCounter] = childIndex; //cell;
domainListCounter++;
} else {
// child is a leaf, thus add node in between
int cell = atomicAdd(index, 1);
child[8 * /*childIndex*/temp + path[j]] = cell;
//printf("\tchild[8*%i + %i] = %i\n", temp, path[j], cell);
min_x = *minX;
max_x = *maxX;
min_y = *minY;
max_y = *maxY;
min_z = *minZ;
max_z = *maxZ;
for (int k=0; k<=j; k++) {
currentChild = path[k];
//printf("adding path[%i] = %i (j = %i)\n", k, path[k], j);
if (currentChild % 2 != 0) {
max_x = 0.5 * (min_x + max_x);
currentChild -= 1;
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (currentChild % 2 == 0 && currentChild % 4 != 0) {
max_y = 0.5 * (min_y + max_y);
currentChild -= 2;
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (currentChild == 4) {
max_z = 0.5 * (min_z + max_z);
currentChild -= 4;
}
else {
min_z = 0.5 * (min_z + max_z);
}
//printf("\t\t currentChild[%i] = %i %i\n", k, currentChild, path[k]);
}
// insert old/original particle
childPath = 0; //(int) (domainListKeys[i] >> (21 * 3 - 3 * ((j+1) + 1)) & (int)7); //0; //currentChild; //0;
if (x[childIndex] < 0.5 * (min_x + max_x)) {
childPath += 1;
//max_x = 0.5 * (min_x + max_x);
}
//else {
// min_x = 0.5 * (min_x + max_x);
//}
if (y[childIndex] < 0.5 * (min_y + max_y)) {
childPath += 2;
//max_y = 0.5 * (min_y + max_y);
}
//else {
// min_y = 0.5 * (min_y + max_y);
//}
if (z[childIndex] < 0.5 * (min_z + max_z)) {
childPath += 4;
//max_z = 0.5 * (min_z + max_z);
}
//else {
// min_z = 0.5 * (min_z + max_z);
//}
x[cell] += mass[childIndex] * x[childIndex];
y[cell] += mass[childIndex] * y[childIndex];
z[cell] += mass[childIndex] * z[childIndex];
mass[cell] += mass[childIndex];
//printf("path = %i\n", (int) (domainListKeys[i] >> (21 * 3 - 3 * ((j+1) + 1)) & (int)7));
//printf("j = %i, domainListLevels[%i] = %i\n", j, i, domainListLevels[i]);
printf("adding node in between for index %i cell = %i (childPath = %i, j = %i)! x = (%f, %f, %f)\n",
childIndex, cell, childPath, j, x[childIndex], y[childIndex], z[childIndex]);
//for (int l=0; l<=j; l++) {
// printf("\tpath[%i] = %i\n", l, path[l]);
//}
child[8 * cell + childPath] = childIndex;
//printf("child[8 * %i + %i] = %i\n", cell, childPath, childIndex);
childIndex = cell;
domainListIndices[domainListCounter] = childIndex; //temp;
domainListCounter++;
}
}
else {
insert = true;
// check whether node already marked as domain list node
for (int k=0; k<domainListCounter; k++) {
if (childIndex == domainListIndices[k]) {
insert = false;
break;
}
}
if (insert) {
// mark/save node as domain list node
domainListIndices[domainListCounter] = childIndex; //temp;
domainListCounter++;
}
}
}
}
//printf("domainListCounter = %i\n", domainListCounter);
}
// extract lowest domain list nodes from domain list nodes
// lowest domain list node = domain list node with children not being domain list nodes!
__global__ void lowestDomainListNodesKernel(int *domainListIndices, int *domainListIndex,
unsigned long *domainListKeys,
int *lowestDomainListIndices, int *lowestDomainListIndex,
unsigned long *lowestDomainListKeys,
float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int n, int m, int *procCounter) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
bool lowestDomainListNode;
int domainIndex;
int lowestDomainIndex;
int childIndex;
// check all domain list nodes
while ((bodyIndex + offset) < *domainListIndex) {
lowestDomainListNode = true;
// get domain list index of current domain list node
domainIndex = domainListIndices[bodyIndex + offset];
// check all children
for (int i=0; i<8; i++) {
childIndex = child[8 * domainIndex + i];
// check whether child exists
if (childIndex != -1) {
// check whether child is a node
if (childIndex >= n) {
// check if this node is a domain list node
for (int k=0; k<*domainListIndex; k++) {
if (childIndex == domainListIndices[k]) {
//printf("domainIndex = %i childIndex: %i domainListIndices: %i\n", domainIndex,
// childIndex, domainListIndices[k]);
lowestDomainListNode = false;
break;
}
}
// one child being a domain list node is sufficient for not being a lowest domain list node
if (!lowestDomainListNode) {
break;
}
}
}
}
if (lowestDomainListNode) {
// increment lowest domain list counter/index
lowestDomainIndex = atomicAdd(lowestDomainListIndex, 1);
// add/save index of lowest domain list node
lowestDomainListIndices[lowestDomainIndex] = domainIndex;
// add/save key of lowest domain list node
lowestDomainListKeys[lowestDomainIndex] = domainListKeys[bodyIndex + offset];
// debugging
//printf("Adding lowest domain list node #%i (key = %lu)\n", lowestDomainIndex,
// lowestDomainListKeys[lowestDomainIndex]);
}
offset += stride;
}
}
//for debugging purposes
__global__ void treeInfoKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m, int *procCounter, SubDomainKeyTree *s,
int *sortArray, int *sortArrayOut) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
unsigned long key;
int toCheck;
int toCheckSorted;
int proc;
int offset = 0;
// ---- check whether particles exist that do not belong to this process
/*while ((bodyIndex + offset) < procCounter[0]) {
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset], minX, maxX,
minY, maxY, minZ, maxZ, 21);
proc = key2proc(key, s);
if (proc != s->rank) {
printf("ATTENTION: myrank = %i and proc = %i (bodyIndex + offset = %i)\n", s->rank, proc,
bodyIndex + offset);
}
offset += stride;
}*/
// ----------------------------------------------------------------------
while ((bodyIndex + offset) < 8) {
printf("rank[%i] count[%i] = %i\n", s->rank, bodyIndex+offset, count[child[bodyIndex+offset]]);
offset += stride;
}
/*// ---- general information about particles ....
while ((bodyIndex + offset) < n) {
if ((bodyIndex + offset) % 100000 == 0) {
printf("particle[%i]: x = (%f, %f, %f) m = %f\n", bodyIndex+offset, x[bodyIndex+offset],
y[bodyIndex+offset], z[bodyIndex+offset], mass[bodyIndex+offset]);
}
offset += stride;
}
// ----------------------------------------------------------------------*/
}
__global__ void domainListInfoKernel(float *x, float *y, float *z, float *mass, int *child, int *index, int n,
int *domainListIndices, int *domainListIndex,
int *domainListLevels, int *lowestDomainListIndices,
int *lowestDomainListIndex, SubDomainKeyTree *s) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < *domainListIndex) {
/*printf("[rank %i] domainListIndices[%i] = %i x = (%f, %f, %f) m = %f\n", s->rank, bodyIndex + offset,
domainListIndices[bodyIndex + offset], x[domainListIndices[bodyIndex + offset]],
y[domainListIndices[bodyIndex + offset]], z[domainListIndices[bodyIndex + offset]],
mass[domainListIndices[bodyIndex + offset]]);*/
/*if (mass[domainListIndices[bodyIndex + offset]] == 0.f) {
for (int i=0; i<8; i++) {
printf("[rank %i] domainListIndices[%i] child[%i] = %i\n", s->rank, bodyIndex + offset, i,
child[8*domainListIndices[bodyIndex + offset] + i]);
}
}*/
offset += stride;
}
}
// convert key (unsigned long) to more readable level-wise (and separated) string/char-array
__device__ void key2Char(unsigned long key, int maxLevel, char *keyAsChar) {
int level[21];
for (int i=0; i<maxLevel; i++) {
level[i] = (int)(key >> (maxLevel*3 - 3*(i+1)) & (int)7);
}
for (int i=0; i<=maxLevel; i++) {
keyAsChar[2*i] = level[i] + '0';
keyAsChar[2*i+1] = '|';
}
keyAsChar[2*maxLevel+3] = '\0';
}
/*// table needed to convert from Lebesgue to Hilbert keys
__device__ const unsigned char DirTable[12][8] =
{ { 8,10, 3, 3, 4, 5, 4, 5}, { 2, 2,11, 9, 4, 5, 4, 5},
{ 7, 6, 7, 6, 8,10, 1, 1}, { 7, 6, 7, 6, 0, 0,11, 9},
{ 0, 8, 1,11, 6, 8, 6,11}, {10, 0, 9, 1,10, 7, 9, 7},
{10, 4, 9, 4,10, 2, 9, 3}, { 5, 8, 5,11, 2, 8, 3,11},
{ 4, 9, 0, 0, 7, 9, 2, 2}, { 1, 1, 8, 5, 3, 3, 8, 6},
{11, 5, 0, 0,11, 6, 2, 2}, { 1, 1, 4,10, 3, 3, 7,10} };
// table needed to convert from Lebesgue to Hilbert keys
__device__ const unsigned char HilbertTable[12][8] = { {0,7,3,4,1,6,2,5}, {4,3,7,0,5,2,6,1}, {6,1,5,2,7,0,4,3},
{2,5,1,6,3,4,0,7}, {0,1,7,6,3,2,4,5}, {6,7,1,0,5,4,2,3},
{2,3,5,4,1,0,6,7}, {4,5,3,2,7,6,0,1}, {0,3,1,2,7,4,6,5},
{2,1,3,0,5,6,4,7}, {4,7,5,6,3,0,2,1}, {6,5,7,4,1,2,0,3} };*/
// convert Lebesgue key to Hilbert key
__device__ unsigned long Lebesgue2Hilbert(unsigned long lebesgue, int maxLevel) {
unsigned long hilbert = 0UL;
int dir = 0;
for (int lvl=maxLevel; lvl>0; lvl--) {
unsigned long cell = (lebesgue >> ((lvl-1)*3)) & (unsigned long)((1<<3)-1);
hilbert = hilbert << 3;
if (lvl > 0) {
hilbert += HilbertTable[dir][cell];
}
dir = DirTable[dir][cell];
}
return hilbert;
}
// calculate particle key (Lebesgue) per particle based on position (resulting in a overdetermined key)
__device__ unsigned long getParticleKeyPerParticle(float x, float y, float z,
float *minX, float *maxX, float *minY,
float *maxY, float *minZ, float *maxZ,
int maxLevel) {
int level = 0;
unsigned long particleKey = 0UL;
int sonBox = 0;
float min_x = *minX;
float max_x = *maxX;
float min_y = *minY;
float max_y = *maxY;
float min_z = *minZ;
float max_z = *maxZ;
// calculate path to the particle's position assuming an octree with above bounding boxes
while (level <= maxLevel) {
sonBox = 0;
// find insertion point for body
if (x < 0.5 * (min_x+max_x)) {
sonBox += 1;
max_x = 0.5 * (min_x+max_x);
}
else { min_x = 0.5 * (min_x+max_x); }
if (y < 0.5 * (min_y+max_y)) {
sonBox += 2;
max_y = 0.5 * (min_y + max_y);
}
else { min_y = 0.5 * (min_y + max_y); }
if (z < 0.5 * (min_z+max_z)) {
sonBox += 4;
max_z = 0.5 * (min_z + max_z);
}
else { min_z = 0.5 * (min_z + max_z); }
particleKey = particleKey | ((unsigned long)sonBox << (unsigned long)(3 * (maxLevel-level-1)));
level ++;
}
//TODO: Hilbert change
return particleKey;
//return Lebesgue2Hilbert(particleKey, 21);
}
// only for testing...
// calculating the key for all particles
__global__ void getParticleKeyKernel(float *x, float *y, float *z, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, unsigned long *key, int maxLevel, int n, SubDomainKeyTree *s) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long particleKey;
unsigned long hilbertParticleKey;
//char keyAsChar[21 * 2 + 3];
/*//debugging
if (bodyIndex == 0) {
char rangeAsChar[21 * 2 + 3];
for (int i=0; i<(s->numProcesses + 1); i++) {
key2Char(s->range[i], 21, rangeAsChar);
printf("range[%i] = %lu (%s)\n", i, s->range[i], rangeAsChar);
}
}
//end: debugging*/
while (bodyIndex + offset < n) {
particleKey = 0UL;
particleKey = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, maxLevel);
//char keyAsChar[21 * 2 + 3];
hilbertParticleKey = Lebesgue2Hilbert(particleKey, 21);
key[bodyIndex + offset] = particleKey; //hilbertParticleKey;
//int proc = key2proc(particleKey, s);
//key2Char(testKey, 21, keyAsChar);
//key2Char(hilbertParticleKey, 21, keyAsChar);
//if ((bodyIndex + offset) % 5000 == 0) {
//printf("key[%i]: %lu\n", bodyIndex + offset, testKey);
//for (int proc=0; proc<=s->numProcesses; proc++) {
// printf("range[%i] = %lu\n", proc, s->range[proc]);
//}
//printf("key[%i]: %s = %lu (proc = %i)\n", bodyIndex + offset, keyAsChar, particleKey, proc);
//}
offset += stride;
}
}
// get the corresponding process of a key (using the range within the SubDomainKeyTree)
__device__ int key2proc(unsigned long k, SubDomainKeyTree *s, int curveType) {
if (curveType == 0) {
for (int proc=0; proc<s->numProcesses; proc++) {
if (k >= s->range[proc] && k < s->range[proc+1]) {
return proc;
}
}
}
else {
unsigned long hilbert = Lebesgue2Hilbert(k, 21);
for (int proc = 0; proc < s->numProcesses; proc++) {
if (hilbert >= s->range[proc] && hilbert < s->range[proc + 1]) {
return proc;
}
}
}
//printf("ERROR: key2proc(k=%lu): -1!", k);
return -1; // error
}
// Traversing the tree iteratively using an explicit stack
// not used (yet)
__global__ void traverseIterativeKernel(float *x, float *y, float *z, float *mass, int *child, int n, int m,
SubDomainKeyTree *s, int maxLevel) {
// starting traversing with the child[0, ..., 7] representing the first level of the tree
// explicit stack using shared memory
__shared__ int stack[128];
__shared__ int *stackPtr;
stackPtr = stack;
*stackPtr++ = NULL;
int childIndex;
int node;
int particleCounter = 0;
for (int j=0; j<8; j++) {
childIndex;
node = n;
stack[0] = child[j];
stackPtr = stack;
//counter = 0;
while (node != NULL /*&& counter < 200000*/) {
//counter++;
childIndex = *stackPtr;
for (int i=0; i<8; i++) {
if (child[8*childIndex + i] == -1) { /*do nothing*/ }
else {
if (child[8*childIndex + i] < n) {
particleCounter++;
}
else {
*stackPtr++ = child[8*childIndex + i]; //push
}
}
}
node = *--stackPtr; //pop
}
}
printf("Finished traversing iteratively! particleCounter = %i\n", particleCounter);
}
// get the domain list keys (and levels) resulting from ranges (within the SubDomainKeyTree)
// domain list nodes = common coarse tree for all processes
__global__ void createDomainListKernel(SubDomainKeyTree *s, int maxLevel, unsigned long *domainListKeys, int *levels,
int *index, int curveType) {
char keyAsChar[21 * 2 + 3];
// workaround for fixing bug... in principle: unsigned long keyMax = (1 << 63) - 1;
unsigned long shiftValue = 1;
unsigned long toShift = 63;
unsigned long keyMax = (shiftValue << toShift) - 1; // 1 << 63 not working!
//key2Char(keyMax, 21, keyAsChar); //printf("keyMax: %lu = %s\n", keyMax, keyAsChar);
unsigned long key2test = 0UL;
int level = 0;
level++;
// in principle: traversing a (non-existent) octree by walking the 1D spacefilling curve (keys of the tree nodes)
while (key2test < keyMax) {
if (isDomainListNode(key2test & (~0UL << (3 * (maxLevel - level + 1))), maxLevel, level-1, s, curveType)) {
// add domain list key
domainListKeys[*index] = key2test;
// add domain list level
levels[*index] = level;
*index += 1;
if (isDomainListNode(key2test, maxLevel, level, s, curveType)) {
level++;
}
else {
key2test = key2test + (1UL << 3 * (maxLevel - level));
}
} else {
level--;
// not necessary... 1 = 1
//key2test = keyMaxLevel(key2test & (~0UL << (3 * (maxLevel - level))), maxLevel, level, s) + 1 - (1UL << (3 * (maxLevel - level)));
}
}
//for (int i=0; i < *index; i++) {
// key2Char(domainListKeys[i], 21, keyAsChar);
//}
}
// check whether node is a domain list node
__device__ bool isDomainListNode(unsigned long key, int maxLevel, int level, SubDomainKeyTree *s, int curveType) {
int p1 = key2proc(key, s, curveType);
int p2 = key2proc(key | ~(~0UL << 3*(maxLevel-level)), s, curveType);
if (p1 != p2) {
return true;
}
else {
return false;
}
}
// get the maximal key of a key regarding a specific level
__device__ unsigned long keyMaxLevel(unsigned long key, int maxLevel, int level, SubDomainKeyTree *s) {
unsigned long keyMax = key | ~(~0UL << 3*(maxLevel-level));
return keyMax;
}
__global__ void prepareLowestDomainExchangeKernel(float *entry, float *mass, float *tempArray, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int index;
int lowestDomainIndex;
//copy x, y, z, mass of lowest domain list nodes into arrays
//sorting using cub (not here)
while ((bodyIndex + offset) < *lowestDomainListIndex) {
//if (bodyIndex + offset == 0) {
// printf("lowestDomainListIndex = %i\n", *lowestDomainListIndex);
//}
lowestDomainIndex = lowestDomainListIndices[bodyIndex + offset];
if (lowestDomainIndex >= 0) {
tempArray[bodyIndex+offset] = entry[lowestDomainIndex];
}
offset += stride;
}
//serial solution
/*for (int i=0; i<*lowestDomainListIndex; i++) {
tempArray[i] = entry[lowestDomainListIndices[i]];
}*/
}
//TODO: it is not necessary to calculate the moment (x_i * m), thus just use prepareLowestDomainExchangeKernel?
__global__ void prepareLowestDomainExchangeMassKernel(float *mass, float *tempArray, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int index;
int lowestDomainIndex;
//copy x, y, z, mass of lowest domain list nodes into arrays
//sorting using cub (not here)
while ((bodyIndex + offset) < *lowestDomainListIndex) {
lowestDomainIndex = lowestDomainListIndices[bodyIndex + offset];
if (lowestDomainIndex >= 0) {
tempArray[bodyIndex + offset] = mass[lowestDomainIndex];
printf("lowestDomainListIndex[%i]: mass = %f\n", bodyIndex+offset, tempArray[bodyIndex + offset]);
}
offset += stride;
}
}
//TODO: problem since not deterministic? keys are not unique
// at least the domain list nodes in general, but the lowest domain list nodes as well?
__global__ void updateLowestDomainListNodesKernel(float *tempArray, float *entry, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
unsigned long *sortedLowestDomainListKeys, int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int originalIndex = -1;
while ((bodyIndex + offset) < *lowestDomainListIndex) {
for (int i=0; i<*lowestDomainListIndex; i++) {
if (sortedLowestDomainListKeys[bodyIndex + offset] == lowestDomainListKeys[i]) {
originalIndex = i;
//break;
}
}
if (originalIndex == -1) {
printf("ATTENTION: originalIndex = -1 (index = %i)!\n", sortedLowestDomainListKeys[bodyIndex + offset]);
}
entry[lowestDomainListIndices[originalIndex]] = tempArray[bodyIndex + offset];
offset += stride;
}
}
__global__ void compLowestDomainListNodesKernel(float *x, float *y, float *z, float *mass, int *lowestDomainListIndices,
int *lowestDomainListIndex, unsigned long *lowestDomainListKeys,
unsigned long *sortedLowestDomainListKeys, int *counter) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int lowestDomainIndex;
while ((bodyIndex + offset) < *lowestDomainListIndex) {
lowestDomainIndex = lowestDomainListIndices[bodyIndex + offset];
if (mass[lowestDomainIndex] != 0) {
x[lowestDomainIndex] /= mass[lowestDomainIndex];
y[lowestDomainIndex] /= mass[lowestDomainIndex];
z[lowestDomainIndex] /= mass[lowestDomainIndex];
}
// debugging
//printf("lowestDomainIndex = %i x = (%f, %f, %f) m = %f (key: %lu)\n", lowestDomainIndex, x[lowestDomainIndex],
// y[lowestDomainIndex], z[lowestDomainIndex], mass[lowestDomainIndex], lowestDomainListKeys[bodyIndex + offset]);
offset += stride;
}
}
__global__ void zeroDomainListNodesKernel(int *domainListIndex, int *domainListIndices,
int *lowestDomainListIndex, int *lowestDomainListIndices,
float *x, float *y, float *z, float *mass) {
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
int domainIndex;
bool zero;
while ((bodyIndex + offset) < *domainListIndex) {
zero = true;
domainIndex = domainListIndices[bodyIndex + offset];
for (int i=0; i<*lowestDomainListIndex-1; i++) {
if (domainIndex = lowestDomainListIndices[i]) {
zero = false;
}
}
if (zero) {
x[domainIndex] = 0.f;
y[domainIndex] = 0.f;
z[domainIndex] = 0.f;
mass[domainIndex] = 0.f;
}
offset += stride;
}
}
//TODO: lowest domain list nodes or domain list nodes?
__global__ void compLocalPseudoParticlesParKernel(float *x, float *y, float *z, float *mass, int *index, int n,
int *domainListIndices, int *domainListIndex,
int *lowestDomainListIndices, int *lowestDomainListIndex) {
//equivalent to centreOfMassKernel !?
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
bool domainList;
//note: most of it already done within buildTreeKernel
bodyIndex += n;
while (bodyIndex + offset < *index) {
domainList = false;
for (int i=0; i<*domainListIndex; i++) {
if ((bodyIndex + offset) == domainListIndices[i]) {
domainList = true; // hence do not insert
//for (int j=0; j<*lowestDomainListIndex; j++) {
// if ((bodyIndex + offset) == lowestDomainListIndices[j]) {
// domainList = false;
// break;
// }
//}
break;
}
}
if (mass[bodyIndex + offset] != 0 && !domainList) {
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
z[bodyIndex + offset] /= mass[bodyIndex + offset];
}
offset += stride;
}
}
//TODO: check functionality
__global__ void compDomainListPseudoParticlesParKernel(float *x, float *y, float *z, float *mass, int *child, int *index, int n,
int *domainListIndices, int *domainListIndex,
int *domainListLevels, int *lowestDomainListIndices,
int *lowestDomainListIndex) {
//calculate position (center of mass) and mass for domain list nodes
//Problem: start with "deepest" nodes
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset;
int domainIndex;
int level = 21; // max level
bool compute;
// go from max level to level=0
while (level >= 0) {
offset = 0;
compute = true;
while ((bodyIndex + offset) < *domainListIndex) {
compute = true;
domainIndex = domainListIndices[bodyIndex + offset];
for (int i=0; i<*lowestDomainListIndex; i++) {
if (domainIndex == lowestDomainListIndices[i]) {
compute = false;
}
}
if (compute && domainListLevels[bodyIndex + offset] == level) {
// do the calculation
for (int i=0; i<8; i++) {
x[domainIndex] += x[child[8*domainIndex + i]] * mass[child[8*domainIndex + i]];
y[domainIndex] += y[child[8*domainIndex + i]] * mass[child[8*domainIndex + i]];
z[domainIndex] += z[child[8*domainIndex + i]] * mass[child[8*domainIndex + i]];
mass[domainIndex] += mass[child[8*domainIndex + i]];
}
if (mass[domainIndex] != 0) {
x[domainIndex] /= mass[domainIndex];
y[domainIndex] /= mass[domainIndex];
z[domainIndex] /= mass[domainIndex];
}
// debugging
//printf("domain node: key = %lu x = (%f, %f, %f) m = %f\n", domainListIndices[bodyIndex + offset],
// x[domainIndex], y[domainIndex], z[domainIndex], mass[domainIndex]);
}
offset += stride;
}
__syncthreads();
level--;
}
}
// Kernel 3: computes the COM for each cell
__global__ void centreOfMassKernel(float *x, float *y, float *z, float *mass, int *index, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
//note: most of it already done within buildTreeKernel
bodyIndex += n;
while (bodyIndex + offset < *index) {
if (mass[bodyIndex + offset] == 0) {
printf("centreOfMassKernel: mass = 0 (%i)!\n", bodyIndex + offset);
}
if (mass != 0) {
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
z[bodyIndex + offset] /= mass[bodyIndex + offset];
}
offset += stride;
}
}
// Kernel 4: sorts the bodies
__global__ void sortKernel(int *count, int *start, int *sorted, int *child, int *index, int n, int m)
{
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
if (bodyIndex == 0) {
int sumParticles = 0;
for (int i=0; i<8; i++) {
sumParticles += count[child[i]];
}
printf("sumParticles = %i\n", sumParticles);
}
int s = 0;
if (threadIdx.x == 0) {
for (int i=0; i<8; i++){
int node = child[i];
// not a leaf node
if (node >= m) { //n
start[node] = s;
s += count[node];
}
// leaf node
else if (node >= 0) {
sorted[s] = node;
s++;
}
}
}
int cell = m + bodyIndex;
int ind = *index;
//int counter = 0; // for debugging purposes or rather to achieve the kernel to be finished
while ((cell + offset) < ind /*&& counter < 100000*/) {
//counter++;
//if (counter > 99998) {
//printf("cell + offset = %i\n", cell+offset);
//}
s = start[cell + offset];
if (s >= 0) {
for (int i=0; i<8; i++) {
int node = child[8*(cell+offset) + i];
// not a leaf node
if (node >= m) { //m
start[node] = s;
s += count[node];
}
// leaf node
else if (node >= 0) {
sorted[s] = node;
s++;
}
}
offset += stride;
}
}
}
// Kernel 5: computes the (gravitational) forces
__global__ void computeForcesKernel(float* x, float *y, float *z, float *vx, float *vy, float *vz,
float *ax, float *ay, float *az, float *mass,
int *sorted, int *child, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int n, int m,
float g, int blockSize, int warp, int stackSize, SubDomainKeyTree *s)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
//debug
unsigned long key;
//__shared__ float depth[stackSize * blockSize/warp];
// stack controlled by one thread per warp
//__shared__ int stack[stackSize * blockSize/warp];
extern __shared__ float buffer[];
float* depth = (float*)buffer;
float* stack = (float*)&depth[stackSize* blockSize/warp];
float x_radius = 0.5*(*maxX - (*minX));
float y_radius = 0.5*(*maxY - (*minY));
float z_radius = 0.5*(*maxZ - (*minZ));
float radius_max = fmaxf(x_radius, y_radius);
float radius = fmaxf(radius_max, z_radius);
// in case that one of the first 8 children are a leaf
int jj = -1;
for (int i=0; i<8; i++) {
if (child[i] != -1) {
jj++;
}
}
int counter = threadIdx.x % warp;
int stackStartIndex = stackSize*(threadIdx.x / warp);
while ((bodyIndex + offset) < m) {
int sortedIndex = sorted[bodyIndex + offset];
float pos_x = x[sortedIndex];
float pos_y = y[sortedIndex];
float pos_z = z[sortedIndex];
float acc_x = 0.0;
float acc_y = 0.0;
float acc_z = 0.0;
// initialize stack
int top = jj + stackStartIndex;
if (counter == 0) {
int temp = 0;
for (int i=0; i<8; i++) {
// if child is not locked
if (child[i] != -1) {
stack[stackStartIndex + temp] = child[i];
depth[stackStartIndex + temp] = radius*radius/theta;
temp++;
}
}
}
__syncthreads();
// while stack is not empty / more nodes to visit
while (top >= stackStartIndex) {
int node = stack[top];
//debug
//if (node > n && node < m) {
// printf("PARALLEL FORCE! (node = %i x = (%f, %f, %f) m = %f)\n", node, x[node], y[node], z[node],
// mass[node]);
//}
//end: debug
float dp = 0.25*depth[top]; // float dp = depth[top];
for (int i=0; i<8; i++) {
int ch = child[8*node + i];
//__threadfence();
if (ch >= 0) {
float dx = x[ch] - pos_x;
float dy = y[ch] - pos_y;
float dz = z[ch] - pos_z;
float r = dx*dx + dy*dy + dz*dz + eps_squared;
//unsigned activeMask = __activemask();
//if (ch < n /*is leaf node*/ || !__any_sync(activeMask, dp > r)) {
if (ch < n /*is leaf node*/ || __all_sync(__activemask(), dp <= r)) {
/*//debug
key = getParticleKeyPerParticle(x[ch], y[ch], z[ch], minX, maxX, minY, maxY,
minZ, maxZ, 21);
if (key2proc(key, s) != s->rank) {
printf("Parallel force! child = %i x = (%f, %f, %f) mass = %f\n", ch, x[ch], y[ch], z[ch], mass[ch]);
}
//end: debug*/
// calculate interaction force contribution
r = rsqrt(r);
float f = mass[ch] * r * r * r;
acc_x += f*dx;
acc_y += f*dy;
acc_z += f*dz;
}
else {
// if first thread in warp: push node's children onto iteration stack
if (counter == 0) {
stack[top] = ch;
depth[top] = dp; // depth[top] = 0.25*dp;
}
top++; // descend to next tree level
//__threadfence();
}
}
else { /*top = max(stackStartIndex, top-1); */}
}
top--;
}
// update body data
ax[sortedIndex] = acc_x;
ay[sortedIndex] = acc_y;
az[sortedIndex] = acc_z;
offset += stride;
__syncthreads();
}
}
// calculating the smallest distance between two nodes
__device__ float smallestDistance(float* x, float *y, float *z, int node1, int node2) {
float dx;
if (x[node1] < x[node2]) {
dx = x[node2] - x[node1];
}
else if (x[node1] > x[node2]) {
dx = x[node1] - x[node2];
}
else {
dx = 0.f;
}
float dy;
if (y[node1] < y[node2]) {
dy = y[node2] - y[node1];
}
else if (y[node1] > y[node2]) {
dy = y[node1] - y[node2];
}
else {
dy = 0.f;
}
float dz;
if (z[node1] < z[node2]) {
dz = z[node2] - z[node1];
}
else if (z[node1] > z[node2]) {
dz = z[node1] - z[node2];
}
else {
dz = 0.f;
}
return sqrtf(dx*dx + dy*dy + dz*dz);
}
//copy non-contiguous array elements into another array contiguously (in order to send them via MPI)
// e.g.: [5, 6, 3, 6, 6, 8] -> relevant indices = [1, 5] -> [6, 8]
__global__ void collectSendIndicesKernel(int *sendIndices, float *entry, float *tempArray, int *domainListCounter,
int sendCount) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int insertIndex;
while ((bodyIndex + offset) < sendCount) {
tempArray[bodyIndex + offset] = entry[sendIndices[bodyIndex + offset]];
offset += stride;
}
}
//ATTENTION: causes duplicate entries, which need to be removed afterwards
__global__ void symbolicForceKernel(int relevantIndex, float *x, float *y, float *z, float *mass, float *minX, float *maxX, float *minY,
float *maxY, float *minZ, float *maxZ, int *child, int *domainListIndex,
unsigned long *domainListKeys, int *domainListIndices, int *domainListLevels,
int *domainListCounter, int *sendIndices, int *index, int *particleCounter,
SubDomainKeyTree *s, int n, int m, float diam, float theta_, int *mutex,
int *relevantDomainListIndices) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
float r;
int insertIndex;
bool insert;
int level;
int childIndex;
//bool redo = false;
while ((bodyIndex + offset) < *index) {
//if ((bodyIndex + offset) == 0) {
// printf("relevantIndex: %i\n", relevantDomainListIndices[relevantIndex]);
//}
insert = true;
//redo = false;
for (int i=0; i<*domainListIndex; i++) {
if ((bodyIndex + offset) == domainListIndices[i]) {
insert = false;
break;
}
}
//if (mass[relevantDomainListIndices[relevantIndex]] == 0) {
// insert = false;
//}
// TODO: CHANGED: relevantIndex -> relevantDomainListIndices[relevantIndex]
if (insert && (bodyIndex + offset) != relevantDomainListIndices[relevantIndex] && ((bodyIndex + offset) < particleCounter[s->rank] || (bodyIndex + offset) > n)) {
//r = smallestDistance(x, y, z, bodyIndex + offset, relevantDomainListIndices[relevantIndex]); //relevantIndex, bodyIndex + offset);
r = smallestDistance(x, y, z, relevantDomainListIndices[relevantIndex], bodyIndex + offset);
//calculate tree level by determining the particle's key and traversing the tree until hitting that particle
level = getTreeLevel(bodyIndex + offset, child, x, y, z, minX, maxX, minY, maxY, minZ, maxZ);
if ((powf(0.5, level) * diam) >= (theta_ * r) && level >= 0) {
//TODO: insert cell itself or children?
/// inserting cell itself
/*//check whether node is a domain list node
for (int i=0; i<*domainListIndex; i++) {
if ((bodyIndex + offset) == domainListIndices[i]) {
insert = false;
break;
//printf("domain list nodes do not need to be sent!\n");
}
}
if (insert) {
//add to indices to be sent
insertIndex = atomicAdd(domainListCounter, 1);
sendIndices[insertIndex] = bodyIndex + offset;
}
else {
}*/
/// inserting children
for (int i=0; i<8; i++) {
childIndex = child[8*(bodyIndex + offset) + i];
//check whether node is already within the indices to be sent
//check whether node is a domain list node
for (int i = 0; i < *domainListIndex; i++) {
if (childIndex == domainListIndices[i]) {
insert = false;
//printf("domain list nodes do not need to be sent!\n");
}
}
if (insert && childIndex != -1) {
//add to indices to be sent
insertIndex = atomicAdd(domainListCounter, 1);
sendIndices[insertIndex] = childIndex;
}
}
}
}
else {
//no particle to examine...
}
offset += stride;
}
}
//reset domainListCounter after compTheta!
__global__ void compThetaKernel(float *x, float *y, float *z, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int *domainListIndex, int *domainListCounter,
unsigned long *domainListKeys, int *domainListIndices, int *domainListLevels,
int *relevantDomainListIndices, SubDomainKeyTree *s, int curveType) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int bodyIndex = 0;
unsigned long key;
int domainIndex;
//"loop" over domain list nodes
while ((index + offset) < *domainListIndex) {
bodyIndex = domainListIndices[index + offset];
//calculate key
key = getParticleKeyPerParticle(x[bodyIndex], y[bodyIndex], z[bodyIndex], minX, maxX, minY, maxY,
minZ, maxZ, 21);
//if domain list node belongs to other process: add to relevant domain list indices
if (key2proc(key, s, curveType) != s->rank) {
domainIndex = atomicAdd(domainListCounter, 1);
relevantDomainListIndices[domainIndex] = bodyIndex;
//printf("relevant domain list index: %i\n", bodyIndex);
}
offset += stride;
}
}
// Kernel 6: updates the bodies/particles
__global__ void updateKernel(float *x, float *y, float *z, float *vx, float *vy, float *vz,
float *ax, float *ay, float *az, int n, float dt, float d) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while (bodyIndex + offset < n) {
// calculating/updating the velocities
vx[bodyIndex + offset] += dt * ax[bodyIndex + offset];
vy[bodyIndex + offset] += dt * ay[bodyIndex + offset];
vz[bodyIndex + offset] += dt * az[bodyIndex + offset];
// calculating/updating the positions
x[bodyIndex + offset] += d * dt * vx[bodyIndex + offset];
y[bodyIndex + offset] += d * dt * vy[bodyIndex + offset];
z[bodyIndex + offset] += d * dt * vz[bodyIndex + offset];
offset += stride;
}
}
//TODO: only update/calculate COM for not domain list nodes?!
__global__ void insertReceivedParticlesKernel(float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int *to_delete_leaf, int *domainListIndices,
int *domainListIndex, int *lowestDomainListIndices, int *lowestDomainListIndex,
int n, int m) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//note: -1 used as "null pointer"
//note: -2 used to lock a child (pointer)
int offset;
bool newBody = true;
float min_x;
float max_x;
float min_y;
float max_y;
float min_z;
float max_z;
int childPath;
int temp;
bool isDomainList = false;
offset = 0;
bodyIndex += to_delete_leaf[0];
//if ((bodyIndex + offset) % 10000 == 0) {
// printf("index = %i x = (%f, %f, %f)\n", bodyIndex + offset, x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset]);
//}
while ((bodyIndex + offset) < to_delete_leaf[1] && (bodyIndex + offset) > to_delete_leaf[0]) {
//if ((bodyIndex + offset) % 100 == 0) {
//if (mass[bodyIndex+offset] > 200.f) {
// printf("insert particle %i: x = (%f, %f, %f) m = %f\n", bodyIndex+offset, x[bodyIndex+offset],
// y[bodyIndex+offset], z[bodyIndex+offset], mass[bodyIndex+offset]);
//}
/*//debugging
if ((bodyIndex + offset) % 100 == 0) {
printf("index = %i x = (%f, %f, %f) (index = %i) to_delete_leaf = (%i, %i)\n", bodyIndex + offset, x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset], *index, to_delete_leaf[0], to_delete_leaf[1]);
//printf("index = %i x = (%f, %f, %f) (index = %i) to_delete_leaf = (%i, %i)\n", bodyIndex + offset - 10000, x[bodyIndex + offset - 10000], y[bodyIndex + offset-10000], z[bodyIndex + offset-10000], *index, to_delete_leaf[0], to_delete_leaf[1]);
}
for (int i=to_delete_leaf[0]; i<to_delete_leaf[1]; i++) {
if (i != (bodyIndex + offset)) {
if (x[i] == x[bodyIndex + offset]) {
//printf("ATTENTION: x[%i] = (%f, %f, %f) vs. x[%i] = (%f, %f, %f)\n", i, x[i], y[i], z[i],
// bodyIndex + offset, x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset]);
}
}
}
//end: debugging*/
//debugging
//offset += stride;
if (newBody) {
newBody = false;
isDomainList = false;
min_x = *minX;
max_x = *maxX;
min_y = *minY;
max_y = *maxY;
min_z = *minZ;
max_z = *maxZ;
temp = 0;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
}
int childIndex = child[temp*8 + childPath];
// traverse tree until hitting leaf node
while (childIndex >= m /*&& childIndex < (8*m)*/) { //formerly n
isDomainList = false;
temp = childIndex;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) { // x direction
childPath += 1;
max_x = 0.5 * (min_x + max_x);
}
else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) { // y direction
childPath += 2;
max_y = 0.5 * (min_y + max_y);
}
else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) { // z direction
childPath += 4;
max_z = 0.5 * (min_z + max_z);
}
else {
min_z = 0.5 * (min_z + max_z);
}
for (int i=0; i<*domainListIndex; i++) {
if (temp == domainListIndices[i]) {
isDomainList = true;
break;
}
}
//TODO: !!!
if (/*true*/ !isDomainList) {
if (mass[bodyIndex + offset] != 0) {
atomicAdd(&x[temp], mass[bodyIndex + offset] * x[bodyIndex + offset]);
atomicAdd(&y[temp], mass[bodyIndex + offset] * y[bodyIndex + offset]);
atomicAdd(&z[temp], mass[bodyIndex + offset] * z[bodyIndex + offset]);
}
atomicAdd(&mass[temp], mass[bodyIndex + offset]);
//atomicAdd(&count[temp], 1); // do not count, since particles are just temporarily saved on this process
}
atomicAdd(&count[temp], 1); // do not count, since particles are just temporarily saved on this process
childIndex = child[8*temp + childPath];
}
// if child is not locked
if (childIndex != -2) {
int locked = temp * 8 + childPath;
//lock
if (atomicCAS(&child[locked], childIndex, -2) == childIndex) {
// check whether a body is already stored at the location
if (childIndex == -1) {
//insert body and release lock
child[locked] = bodyIndex + offset;
}
else {
int patch = 8 * m; //8*n
while (childIndex >= 0 && childIndex < n) {
//debug
if (x[childIndex] == x[bodyIndex + offset]) {
printf("ATTENTION (shouldn't happen...): x[%i] = (%f, %f, %f) vs. x[%i] = (%f, %f, %f) | to_delete_leaf = (%i, %i)\n",
childIndex, x[childIndex], y[childIndex], z[childIndex], bodyIndex + offset, x[bodyIndex + offset],
y[bodyIndex + offset], z[bodyIndex + offset], to_delete_leaf[0], to_delete_leaf[1]);
}
//create a new cell (by atomically requesting the next unused array index)
int cell = atomicAdd(index, 1);
patch = min(patch, cell);
if (patch != cell) {
child[8 * temp + childPath] = cell;
}
// insert old/original particle
childPath = 0;
if (x[childIndex] < 0.5 * (min_x + max_x)) { childPath += 1; }
if (y[childIndex] < 0.5 * (min_y + max_y)) { childPath += 2; }
if (z[childIndex] < 0.5 * (min_z + max_z)) { childPath += 4; }
x[cell] += mass[childIndex] * x[childIndex];
y[cell] += mass[childIndex] * y[childIndex];
z[cell] += mass[childIndex] * z[childIndex];
mass[cell] += mass[childIndex];
// do not count, since particles are just temporarily saved on this process
count[cell] += count[childIndex];
child[8 * cell + childPath] = childIndex;
start[cell] = -1; //TODO: resetting start needed in insertReceivedParticles()?
// insert new particle
temp = cell;
childPath = 0;
// find insertion point for body
if (x[bodyIndex + offset] < 0.5 * (min_x + max_x)) {
childPath += 1;
max_x = 0.5 * (min_x + max_x);
} else {
min_x = 0.5 * (min_x + max_x);
}
if (y[bodyIndex + offset] < 0.5 * (min_y + max_y)) {
childPath += 2;
max_y = 0.5 * (min_y + max_y);
} else {
min_y = 0.5 * (min_y + max_y);
}
if (z[bodyIndex + offset] < 0.5 * (min_z + max_z)) {
childPath += 4;
max_z = 0.5 * (min_z + max_z);
} else {
min_z = 0.5 * (min_z + max_z);
}
// COM / preparing for calculation of COM
if (mass[bodyIndex + offset] != 0) {
x[cell] += mass[bodyIndex + offset] * x[bodyIndex + offset];
y[cell] += mass[bodyIndex + offset] * y[bodyIndex + offset];
z[cell] += mass[bodyIndex + offset] * z[bodyIndex + offset];
mass[cell] += mass[bodyIndex + offset];
}
// do not count, since particles are just temporarily saved on this process
count[cell] += count[bodyIndex + offset];
childIndex = child[8 * temp + childPath];
}
child[8 * temp + childPath] = bodyIndex + offset;
__threadfence(); // written to global memory arrays (child, x, y, mass) thus need to fence
child[locked] = patch;
}
offset += stride;
newBody = true;
}
else {
}
}
else {
}
__syncthreads();
}
}
__global__ void centreOfMassReceivedParticlesKernel(float *x, float *y, float *z, float *mass, int *startIndex, int *endIndex, int n)
{
int bodyIndex = threadIdx.x + blockIdx.x*blockDim.x;
int stride = blockDim.x*gridDim.x;
int offset = 0;
//note: most of it already done within buildTreeKernel
bodyIndex += *startIndex;
while ((bodyIndex + offset) < *endIndex) {
if (mass[bodyIndex + offset] == 0) {
printf("centreOfMassKernel: mass = 0 (%i)!\n", bodyIndex + offset);
}
if (mass != 0) {
x[bodyIndex + offset] /= mass[bodyIndex + offset];
y[bodyIndex + offset] /= mass[bodyIndex + offset];
z[bodyIndex + offset] /= mass[bodyIndex + offset];
}
offset += stride;
}
}
// probably not needed, since tree is built (newly) for every iteration (step)
__global__ void repairTreeKernel(float *x, float *y, float *z, float *vx, float *vy, float *vz,
float *ax, float *ay, float *az, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, int *to_delete_cell, int *to_delete_leaf,
int *domainListIndices, int n, int m) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
//delete inserted leaves
while ((bodyIndex + offset) >= to_delete_leaf[0] && (bodyIndex + offset) < to_delete_leaf[1]) {
for (int i=0; i<8; i++) {
child[(bodyIndex + offset)*8 + i] = -1;
}
count[bodyIndex + offset] = 1;
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
z[bodyIndex + offset] = 0;
vx[bodyIndex + offset] = 0;
vy[bodyIndex + offset] = 0;
vz[bodyIndex + offset] = 0;
ax[bodyIndex + offset] = 0;
ay[bodyIndex + offset] = 0;
az[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
start[bodyIndex + offset] = -1;
//sorted[bodyIndex + offset] = 0;
offset += stride;
}
offset = 0;
//delete inserted cells
while ((bodyIndex + offset) >= to_delete_cell[0] && (bodyIndex + offset) < to_delete_cell[1]) {
for (int i=0; i<8; i++) {
child[(bodyIndex + offset)*8 + i] = -1;
}
count[bodyIndex + offset] = 0;
x[bodyIndex + offset] = 0;
y[bodyIndex + offset] = 0;
z[bodyIndex + offset] = 0;
vx[bodyIndex + offset] = 0;
vy[bodyIndex + offset] = 0;
vz[bodyIndex + offset] = 0;
ax[bodyIndex + offset] = 0;
ay[bodyIndex + offset] = 0;
az[bodyIndex + offset] = 0;
mass[bodyIndex + offset] = 0;
start[bodyIndex + offset] = -1;
//sorted[bodyIndex + offset] = 0;
offset += stride;
}
}
__device__ int getTreeLevel(int index, int *child, float *x, float *y, float *z, float *minX, float *maxX, float *minY,
float *maxY, float *minZ, float *maxZ) {
unsigned long key = getParticleKeyPerParticle(x[index], y[index], z[index], minX, maxX, minY, maxY, minZ, maxZ, 21);
//int proc = key2proc(key, s);
int level = 0; //TODO: initialize level with 0 or 1 for getTreeLevel()?
int childIndex;
int path[21];
for (int i=0; i<21; i++) {
path[i] = (int) (key >> (21*3 - 3 * (i + 1)) & (int)7);
}
childIndex = 0;//child[path[0]];
//TODO: where to put level++ for getTreeLevel()?
for (int i=0; i<21; i++) {
//level++;
//childIndex = child[8*childIndex + path[i]];
if (childIndex == index) {
return level;
}
childIndex = child[8*childIndex + path[i]];
level++;
//childIndex = child[8*childIndex + path[i]];
//level++;
}
childIndex = 0; //child[path[0]];
printf("ATTENTION: level = -1 (index = %i x = (%f, %f, %f))\n", index, x[index], y[index], z[index]);
//printf("\tlevel = -1 childIndex = %i path[%i] = %i\n", childIndex, 0, path[0]);
/*for (int i=0; i<21; i++) {
childIndex = child[8*childIndex + path[i]];
printf("\tlevel = -1 childIndex = %i path[%i] = %i\n", childIndex, i, path[i]);
//for (int ii=0; ii<21; ii++) {
// printf("\t\t child[8*childIndex + %i] = %i\n", ii, child[8*childIndex + ii]);
//}
}*/
return -1;
}
// for debugging purposes
__global__ void findDuplicatesKernel(float *array, float *array_2, int length, SubDomainKeyTree *s, int *duplicateCounter) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < length) {
for (int i=0; i<length; i++) {
if (i != (bodyIndex + offset)) {
if (array[bodyIndex + offset] == array[i] && array_2[bodyIndex + offset] == array_2[i]) {
duplicateCounter[i] += 1;
printf("duplicate! (%i vs. %i) (x = %f, y = %f)\n", i, bodyIndex + offset, array[i], array_2[i]);
}
}
}
offset += stride;
}
}
// mark duplicates within an array (with -1)
__global__ void markDuplicatesKernel(int *indices, float *x, float *y, float *z,
float *mass, SubDomainKeyTree *s, int *counter, int length) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int maxIndex;
//remark: check only x, but in principle check all
while ((bodyIndex + offset) < length) {
if (indices[bodyIndex + offset] != -1) {
for (int i = 0; i < length; i++) {
if (i != (bodyIndex + offset)) {
if (indices[i] != -1 && (x[indices[bodyIndex + offset]] == x[indices[i]] || indices[bodyIndex + offset] == indices[i])) {
maxIndex = max(bodyIndex + offset, i);
// mark larger index with -1 (thus a duplicate)
indices[maxIndex] = -1;
atomicAdd(counter, 1);
}
}
}
}
//__syncthreads();
offset += stride;
}
}
// remove previously marked duplicates or rather copy non-duplicates into another array
__global__ void removeDuplicatesKernel(int *indices, int *removedDuplicatesIndices, int *counter, int length) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int indexToInsert;
while ((bodyIndex + offset) < length) {
if (indices[bodyIndex + offset] != -1) {
indexToInsert = atomicAdd(counter, 1);
removedDuplicatesIndices[indexToInsert] = indices[bodyIndex + offset];
}
offset += stride;
}
}
__global__ void getParticleCount(int *child, int *count, int *particleCount) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
while ((bodyIndex + offset) < 8) {
//particleCount += count[child[bodyIndex + offset]];
atomicAdd(particleCount, count[child[bodyIndex + offset]]);
offset += stride;
}
}
__global__ void createKeyHistRangesKernel(int bins, unsigned long *keyHistRanges) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long max_key = 1UL << 63;
while ((bodyIndex + offset) < bins) {
keyHistRanges[bodyIndex + offset] = (bodyIndex + offset) * (max_key/bins);
//printf("keyHistRanges[%i] = %lu\n", bodyIndex + offset, keyHistRanges[bodyIndex + offset]);
if ((bodyIndex + offset) == (bins - 1)) {
keyHistRanges[bins-1] = KEY_MAX;
}
offset += stride;
}
}
__global__ void keyHistCounterKernel(unsigned long *keyHistRanges, int *keyHistCounts, int bins, int n,
float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, SubDomainKeyTree *s, int curveType) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
unsigned long key;
while ((bodyIndex + offset) < n) {
key = getParticleKeyPerParticle(x[bodyIndex + offset], y[bodyIndex + offset], z[bodyIndex + offset],
minX, maxX, minY, maxY, minZ, maxZ, 21);
if (curveType == 0) {
for (int i=0; i<(bins); i++) {
if (key >= keyHistRanges[i] && key < keyHistRanges[i+1]) {
//keyHistCounts[i] += 1;
atomicAdd(&keyHistCounts[i], 1);
break;
}
}
}
else {
//TODO: Hilbert change
unsigned long hilbert = Lebesgue2Hilbert(key, 21);
for (int i = 0; i < (bins); i++) {
if (hilbert >= keyHistRanges[i] && hilbert < keyHistRanges[i + 1]) {
//keyHistCounts[i] += 1;
atomicAdd(&keyHistCounts[i], 1);
break;
}
}
}
offset += stride;
}
}
//TODO: rename index
__global__ void calculateNewRangeKernel(unsigned long *keyHistRanges, int *keyHistCounts, int bins, int n,
float *x, float *y, float *z, float *mass, int *count, int *start,
int *child, int *index, float *minX, float *maxX, float *minY, float *maxY,
float *minZ, float *maxZ, SubDomainKeyTree *s) {
int bodyIndex = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int sum;
unsigned long newRange;
while ((bodyIndex + offset) < (bins-1)) {
sum = 0;
for (int i=0; i<(bodyIndex+offset); i++) {
sum += keyHistCounts[i];
}
for (int i=1; i<s->numProcesses; i++) {
if ((sum + keyHistCounts[bodyIndex + offset]) >= (i*n) && sum < (i*n)) {
printf("[rank %i] new range: %lu\n", s->rank, keyHistRanges[bodyIndex + offset]);
s->range[i] = keyHistRanges[bodyIndex + offset];
}
}
//printf("[rank %i] keyHistCounts[%i] = %i\n", s->rank, bodyIndex+offset, keyHistCounts[bodyIndex+offset]);
atomicAdd(index, keyHistCounts[bodyIndex+offset]);
offset += stride;
}
} |
4b90dd6dfe8f4ded4313afef7b295df6bec2220b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <chrono>
#include <cassert>
#include <vector>
#include "lodepng.h"
hipError_t status;
#define CudaCall(x) \
status = x; \
if (status != hipSuccess) \
std::cout << "Error [" << status << "]: " << hipGetErrorString(status) << " (" << __FILE__ << ":" << __LINE__ << ")" << std::endl
// GPU Kernel functions
#pragma region gpuCode
__global__
void ScaleAndGray(unsigned char* orig, unsigned* gray, unsigned width, unsigned height, int scaleFactor) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= height || j >= width)
return;
int newWidth = width / scaleFactor;
int x = (scaleFactor * i - 1 * (i > 0));
int y = (scaleFactor * j - 1 * (j > 0));
gray[i * newWidth + j] =
0.3 * orig[x * (4 * width) + 4 * y] +
0.59 * orig[x * (4 * width) + 4 * y + 1] +
0.11 * orig[x * (4 * width) + 4 * y + 2];
}
__global__
void Zncc(unsigned* leftPixels, unsigned* rightPixels, unsigned* disparityMap, unsigned width, unsigned height,
int minDisp, int maxDisp, int windowWidth, int windowHeight) {
unsigned windowSize = windowWidth * windowHeight;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= height || j >= width)
return;
float bestDisparity = maxDisp;
float bestZncc = -1;
// Select the best disparity value for the current pixel
for (int d = minDisp; d <= maxDisp; d++) {
// Calculating mean of blocks using the sliding window method
float meanLBlock = 0, meanRBlock = 0;
for (int x = -windowHeight / 2; x < windowHeight / 2; x++) {
for (int y = -windowWidth / 2; y < windowWidth / 2; y++) {
// Check for image borders
if (
!(i + x >= 0) ||
!(i + x < height) ||
!(j + y >= 0) ||
!(j + y < width) ||
!(j + y - d >= 0) ||
!(j + y - d < width)
) {
continue;
}
meanLBlock += leftPixels[(i + x) * width + (j + y)];
meanRBlock += rightPixels[(i + x) * width + (j + y - d)];
}
}
meanLBlock /= windowSize;
meanRBlock /= windowSize;
// Calculate ZNCC for current disparity value
float stdLBlock = 0, stdRBlock = 0;
float currentZncc = 0;
for (int x = -windowHeight / 2; x < windowHeight / 2; x++) {
for (int y = -windowWidth / 2; y < windowWidth / 2; y++) {
// Check for image borders
if (
!(i + x >= 0) ||
!(i + x < height) ||
!(j + y >= 0) ||
!(j + y < width) ||
!(j + y - d >= 0) ||
!(j + y - d < width)
) {
continue;
}
int centerL = leftPixels[(i + x) * width + (j + y)] - meanLBlock;
int centerR = rightPixels[(i + x) * width + (j + y - d)] - meanRBlock;
// standard deviation
stdLBlock += centerL * centerL;
stdRBlock += centerR * centerR;
currentZncc += centerL * centerR;
}
}
currentZncc /= sqrtf(stdLBlock) * sqrtf(stdRBlock);
// Selecting best disparity
if (currentZncc > bestZncc) {
bestZncc = currentZncc;
bestDisparity = d;
}
}
disparityMap[i * width + j] = (unsigned)fabs(bestDisparity);
}
__global__
void CrossCheck(unsigned* leftDisp, unsigned* rightDisp, unsigned* result, unsigned imSize, int crossCheckingThreshold) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= imSize)
return;
int diff = leftDisp[i] - rightDisp[i];
if (diff >= 0) { // leftDisp is winner
if (diff <= crossCheckingThreshold) {
result[i] = leftDisp[i];
} else {
result[i] = 0;
}
} else { // rightDisp is winner
if (-diff <= crossCheckingThreshold) {
result[i] = rightDisp[i];
} else {
result[i] = 0;
}
}
}
__global__
void OcclusionFill(unsigned* map, unsigned* result, unsigned width, unsigned height, int occlusionNeighbours) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= height || j >= width)
return;
unsigned currentIndex = i * width + j;
result[currentIndex] = map[currentIndex];
// If the pixel value is 0, copy value from nearest non zero neighbour
if (map[currentIndex] == 0) {
bool stop = false;
for (int n = 1; n <= occlusionNeighbours / 2 && !stop; n++) {
for (int y = -n; y <= n && !stop; y++) {
for (int x = -n; x <= n && !stop; x++) {
// Checking for borders
if (
!(i + x >= 0) ||
!(i + x < height) ||
!(j + y >= 0) ||
!(j + y < width) ||
(x == 0 && y == 0)
) {
continue;
}
int index = (i + x) * width + (j + y);
if (map[index] == 0) {
result[currentIndex] = map[index];
stop = true;
break;
}
}
}
}
}
}
#pragma endregion gpuCode
/*
Class to calculate time taken by functions in seconds.
* Creating an object of the class in a function, calls the constructor which starts the timer.
* At the end of the function, the destructor is called which stops the timer and calculates the duration.
* We can get the duration manually using the getElapsedTime method.
*/
class Timer {
private:
std::chrono::time_point<std::chrono::steady_clock> m_Start, m_End;
std::chrono::duration<float> m_Duration;
public:
Timer() {
m_Start = std::chrono::high_resolution_clock::now();
}
~Timer() {
m_End = std::chrono::high_resolution_clock::now();
m_Duration = m_End - m_Start;
std::cout << "Done (" << m_Duration.count() << " s)" << std::endl;
}
float getElapsedTime() {
m_End = std::chrono::high_resolution_clock::now();
m_Duration = m_End - m_Start;
return m_Duration.count();
}
};
constexpr int maxDisparity = 64;
constexpr int windowWidth = 15;
constexpr int windowHeight = 15;
constexpr int crossCheckingThreshold = 2;
constexpr int occlusionNeighbours = 256;
constexpr int scaleFactor = 4;
std::vector<unsigned char> loadImage(const char*, unsigned&, unsigned&);
std::vector<unsigned char> normalize(std::vector<unsigned>, const unsigned, const unsigned);
// Display GPU info
// https://stackoverflow.com/a/5689133
void DisplayHeader() {
const int kb = 1024;
const int mb = kb * kb;
std::cout << "NBody.GPU" << std::endl << "=========" << std::endl << std::endl;
std::cout << "CUDA version: v" << CUDART_VERSION << std::endl;
int devCount;
hipGetDeviceCount(&devCount);
std::cout << "CUDA Devices: " << std::endl << std::endl;
for (int i = 0; i < devCount; ++i) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props, i);
std::cout << i << ": " << props.name << ": " << props.major << "." << props.minor << std::endl;
std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << std::endl;
std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << std::endl;
std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" << std::endl;
std::cout << " Block registers: " << props.regsPerBlock << std::endl << std::endl;
std::cout << " Warp size: " << props.warpSize << std::endl;
std::cout << " Threads per block: " << props.maxThreadsPerBlock << std::endl;
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << std::endl;
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << std::endl;
std::cout << std::endl;
}
}
int main() {
Timer timer;
DisplayHeader();
// Host variables
std::vector<unsigned char> leftPixels, rightPixels;
unsigned width, height, rightWidth, rightHeight;
std::cout << "Reading Left Image...";
leftPixels = loadImage("imageL.png", width, height);
std::cout << "Reading Right Image...";
rightPixels = loadImage("imageR.png", rightWidth, rightHeight);
// left and right images are assumed to be of same dimensions
assert(width == rightWidth && height == rightHeight);
width /= scaleFactor;
height /= scaleFactor;
unsigned imSize = width * height;
unsigned origSize = rightWidth * rightHeight;
std::vector<unsigned> output(imSize);
// Device variabels
unsigned char *d_origL, *d_origR;
unsigned *d_grayL, *d_grayR, *d_dispLR, *d_dispRL, *d_dispCC, *d_output;
CudaCall(hipMalloc((void**) &d_origL, sizeof(unsigned char) * origSize * 4));
CudaCall(hipMalloc((void**) &d_origR, sizeof(unsigned char) * origSize * 4));
CudaCall(hipMalloc((void**) &d_grayL, sizeof(unsigned) * imSize));
CudaCall(hipMalloc((void**) &d_grayR, sizeof(unsigned) * imSize));
CudaCall(hipMalloc((void**) &d_dispLR, sizeof(unsigned) * imSize));
CudaCall(hipMalloc((void**) &d_dispRL, sizeof(unsigned) * imSize));
CudaCall(hipMalloc((void**) &d_dispCC, sizeof(unsigned) * imSize));
CudaCall(hipMalloc((void**) &d_output, sizeof(unsigned) * imSize));
// Copy Data from host to device
CudaCall(hipMemcpy(d_origL, leftPixels.data(), sizeof(leftPixels[0]) * leftPixels.size(), hipMemcpyHostToDevice));
CudaCall(hipMemcpy(d_origR, rightPixels.data(), sizeof(rightPixels[0]) * rightPixels.size(), hipMemcpyHostToDevice));
// Profiling
float elapsed = 0;
hipEvent_t start, stop;
CudaCall(hipEventCreate(&start));
CudaCall(hipEventCreate(&stop));
// Kernel Calls
dim3 blocks(height / 21, width / 21);
dim3 threads(21, 21);
dim3 blocks1D((height / 21) * (width / 21));
dim3 threads1D(21 * 21);
// Scale and Gray left
std::cout << "Converting Left Image to grayscale...";
CudaCall(hipEventRecord(start));
hipLaunchKernelGGL(( ScaleAndGray), dim3(blocks), dim3(threads), 0, 0, d_origL, d_grayL, rightWidth, rightHeight, scaleFactor);
CudaCall(hipEventRecord(stop));
CudaCall(hipEventSynchronize(stop));
CudaCall(hipEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(hipPeekAtLastError());
CudaCall(hipDeviceSynchronize());
// Scale and Gray right
std::cout << "Converting Right Image to grayscale...";
CudaCall(hipEventRecord(start));
hipLaunchKernelGGL(( ScaleAndGray), dim3(blocks), dim3(threads), 0, 0, d_origR, d_grayR, rightWidth, rightHeight, scaleFactor);
CudaCall(hipEventRecord(stop));
CudaCall(hipEventSynchronize(stop));
CudaCall(hipEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(hipPeekAtLastError());
CudaCall(hipDeviceSynchronize());
// Disparity Left over Right
std::cout << "Converting Left Disparity Map...";
CudaCall(hipEventRecord(start));
hipLaunchKernelGGL(( Zncc), dim3(blocks), dim3(threads), 0, 0, d_grayL, d_grayR, d_dispLR, width, height, 0, maxDisparity, windowWidth, windowHeight);
CudaCall(hipEventRecord(stop));
CudaCall(hipEventSynchronize(stop));
CudaCall(hipEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(hipPeekAtLastError());
CudaCall(hipDeviceSynchronize());
// Disparity Right over Left
std::cout << "Converting Right Disparity Map...";
CudaCall(hipEventRecord(start));
hipLaunchKernelGGL(( Zncc), dim3(blocks), dim3(threads), 0, 0, d_grayR, d_grayL, d_dispRL, width, height, -maxDisparity, 0, windowWidth, windowHeight);
CudaCall(hipEventRecord(stop));
CudaCall(hipEventSynchronize(stop));
CudaCall(hipEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(hipPeekAtLastError());
CudaCall(hipDeviceSynchronize());
// Cross Checking
std::cout << "Performing Cross Checking...";
CudaCall(hipEventRecord(start));
hipLaunchKernelGGL(( CrossCheck), dim3(blocks1D), dim3(threads1D), 0, 0, d_dispLR, d_dispRL, d_dispCC, imSize, crossCheckingThreshold);
CudaCall(hipEventRecord(stop));
CudaCall(hipEventSynchronize(stop));
CudaCall(hipEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(hipPeekAtLastError());
CudaCall(hipDeviceSynchronize());
// Occlusion Filling
std::cout << "Performing Occlusion Filling...";
CudaCall(hipEventRecord(start));
hipLaunchKernelGGL(( OcclusionFill), dim3(blocks), dim3(threads), 0, 0, d_dispCC, d_output, width, height, occlusionNeighbours);
CudaCall(hipEventRecord(stop));
CudaCall(hipEventSynchronize(stop));
CudaCall(hipEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(hipPeekAtLastError());
CudaCall(hipDeviceSynchronize());
// Copy data from device to host
CudaCall(hipMemcpy(&output[0], d_output, sizeof(unsigned) * imSize, hipMemcpyDeviceToHost));
lodepng::encode("output.png", normalize(output, width, height), width, height);
std::cout << "The program took " << timer.getElapsedTime() << " s" << std::endl;
hipFree(d_origL);
hipFree(d_origR);
hipFree(d_grayL);
hipFree(d_grayR);
hipFree(d_dispLR);
hipFree(d_dispRL);
hipFree(d_dispCC);
hipFree(d_output);
std::cin.get();
return 0;
}
std::vector<unsigned char> loadImage(const char* filename, unsigned& width, unsigned& height) {
Timer timer;
std::vector<unsigned char> pixels;
unsigned error = lodepng::decode(pixels, width, height, filename);
if (error) {
std::cout << "Failed to load image: " << lodepng_error_text(error) << std::endl;
std::cin.get();
exit(-1);
}
return pixels;
}
std::vector<unsigned char> normalize(
std::vector<unsigned> in,
const unsigned width,
const unsigned height
) {
std::vector<unsigned char> result(width * height * 4);
unsigned char max = 0;
unsigned char min = UCHAR_MAX;
for (int i = 0; i < width * height; i++) {
if (in[i] > max) {
max = in[i];
}
if (in[i] < min) {
min = in[i];
}
}
// Normalize values to be between 0 and 255
int mapIndex = 0;
for (int i = 0; i < width * height * 4; i += 4, mapIndex++) {
result[i] = result[i + 1] = result[i + 2] = (unsigned char)(255 * (in[mapIndex] - min) / (max - min));
result[i + 3] = 255;
}
return result;
}
| 4b90dd6dfe8f4ded4313afef7b295df6bec2220b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <chrono>
#include <cassert>
#include <vector>
#include "lodepng.h"
cudaError_t status;
#define CudaCall(x) \
status = x; \
if (status != cudaSuccess) \
std::cout << "Error [" << status << "]: " << cudaGetErrorString(status) << " (" << __FILE__ << ":" << __LINE__ << ")" << std::endl
// GPU Kernel functions
#pragma region gpuCode
__global__
void ScaleAndGray(unsigned char* orig, unsigned* gray, unsigned width, unsigned height, int scaleFactor) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= height || j >= width)
return;
int newWidth = width / scaleFactor;
int x = (scaleFactor * i - 1 * (i > 0));
int y = (scaleFactor * j - 1 * (j > 0));
gray[i * newWidth + j] =
0.3 * orig[x * (4 * width) + 4 * y] +
0.59 * orig[x * (4 * width) + 4 * y + 1] +
0.11 * orig[x * (4 * width) + 4 * y + 2];
}
__global__
void Zncc(unsigned* leftPixels, unsigned* rightPixels, unsigned* disparityMap, unsigned width, unsigned height,
int minDisp, int maxDisp, int windowWidth, int windowHeight) {
unsigned windowSize = windowWidth * windowHeight;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= height || j >= width)
return;
float bestDisparity = maxDisp;
float bestZncc = -1;
// Select the best disparity value for the current pixel
for (int d = minDisp; d <= maxDisp; d++) {
// Calculating mean of blocks using the sliding window method
float meanLBlock = 0, meanRBlock = 0;
for (int x = -windowHeight / 2; x < windowHeight / 2; x++) {
for (int y = -windowWidth / 2; y < windowWidth / 2; y++) {
// Check for image borders
if (
!(i + x >= 0) ||
!(i + x < height) ||
!(j + y >= 0) ||
!(j + y < width) ||
!(j + y - d >= 0) ||
!(j + y - d < width)
) {
continue;
}
meanLBlock += leftPixels[(i + x) * width + (j + y)];
meanRBlock += rightPixels[(i + x) * width + (j + y - d)];
}
}
meanLBlock /= windowSize;
meanRBlock /= windowSize;
// Calculate ZNCC for current disparity value
float stdLBlock = 0, stdRBlock = 0;
float currentZncc = 0;
for (int x = -windowHeight / 2; x < windowHeight / 2; x++) {
for (int y = -windowWidth / 2; y < windowWidth / 2; y++) {
// Check for image borders
if (
!(i + x >= 0) ||
!(i + x < height) ||
!(j + y >= 0) ||
!(j + y < width) ||
!(j + y - d >= 0) ||
!(j + y - d < width)
) {
continue;
}
int centerL = leftPixels[(i + x) * width + (j + y)] - meanLBlock;
int centerR = rightPixels[(i + x) * width + (j + y - d)] - meanRBlock;
// standard deviation
stdLBlock += centerL * centerL;
stdRBlock += centerR * centerR;
currentZncc += centerL * centerR;
}
}
currentZncc /= sqrtf(stdLBlock) * sqrtf(stdRBlock);
// Selecting best disparity
if (currentZncc > bestZncc) {
bestZncc = currentZncc;
bestDisparity = d;
}
}
disparityMap[i * width + j] = (unsigned)fabs(bestDisparity);
}
__global__
void CrossCheck(unsigned* leftDisp, unsigned* rightDisp, unsigned* result, unsigned imSize, int crossCheckingThreshold) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= imSize)
return;
int diff = leftDisp[i] - rightDisp[i];
if (diff >= 0) { // leftDisp is winner
if (diff <= crossCheckingThreshold) {
result[i] = leftDisp[i];
} else {
result[i] = 0;
}
} else { // rightDisp is winner
if (-diff <= crossCheckingThreshold) {
result[i] = rightDisp[i];
} else {
result[i] = 0;
}
}
}
__global__
void OcclusionFill(unsigned* map, unsigned* result, unsigned width, unsigned height, int occlusionNeighbours) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= height || j >= width)
return;
unsigned currentIndex = i * width + j;
result[currentIndex] = map[currentIndex];
// If the pixel value is 0, copy value from nearest non zero neighbour
if (map[currentIndex] == 0) {
bool stop = false;
for (int n = 1; n <= occlusionNeighbours / 2 && !stop; n++) {
for (int y = -n; y <= n && !stop; y++) {
for (int x = -n; x <= n && !stop; x++) {
// Checking for borders
if (
!(i + x >= 0) ||
!(i + x < height) ||
!(j + y >= 0) ||
!(j + y < width) ||
(x == 0 && y == 0)
) {
continue;
}
int index = (i + x) * width + (j + y);
if (map[index] == 0) {
result[currentIndex] = map[index];
stop = true;
break;
}
}
}
}
}
}
#pragma endregion gpuCode
/*
Class to calculate time taken by functions in seconds.
* Creating an object of the class in a function, calls the constructor which starts the timer.
* At the end of the function, the destructor is called which stops the timer and calculates the duration.
* We can get the duration manually using the getElapsedTime method.
*/
class Timer {
private:
std::chrono::time_point<std::chrono::steady_clock> m_Start, m_End;
std::chrono::duration<float> m_Duration;
public:
Timer() {
m_Start = std::chrono::high_resolution_clock::now();
}
~Timer() {
m_End = std::chrono::high_resolution_clock::now();
m_Duration = m_End - m_Start;
std::cout << "Done (" << m_Duration.count() << " s)" << std::endl;
}
float getElapsedTime() {
m_End = std::chrono::high_resolution_clock::now();
m_Duration = m_End - m_Start;
return m_Duration.count();
}
};
constexpr int maxDisparity = 64;
constexpr int windowWidth = 15;
constexpr int windowHeight = 15;
constexpr int crossCheckingThreshold = 2;
constexpr int occlusionNeighbours = 256;
constexpr int scaleFactor = 4;
std::vector<unsigned char> loadImage(const char*, unsigned&, unsigned&);
std::vector<unsigned char> normalize(std::vector<unsigned>, const unsigned, const unsigned);
// Display GPU info
// https://stackoverflow.com/a/5689133
void DisplayHeader() {
const int kb = 1024;
const int mb = kb * kb;
std::cout << "NBody.GPU" << std::endl << "=========" << std::endl << std::endl;
std::cout << "CUDA version: v" << CUDART_VERSION << std::endl;
int devCount;
cudaGetDeviceCount(&devCount);
std::cout << "CUDA Devices: " << std::endl << std::endl;
for (int i = 0; i < devCount; ++i) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
std::cout << i << ": " << props.name << ": " << props.major << "." << props.minor << std::endl;
std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" << std::endl;
std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" << std::endl;
std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" << std::endl;
std::cout << " Block registers: " << props.regsPerBlock << std::endl << std::endl;
std::cout << " Warp size: " << props.warpSize << std::endl;
std::cout << " Threads per block: " << props.maxThreadsPerBlock << std::endl;
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << std::endl;
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << std::endl;
std::cout << std::endl;
}
}
int main() {
Timer timer;
DisplayHeader();
// Host variables
std::vector<unsigned char> leftPixels, rightPixels;
unsigned width, height, rightWidth, rightHeight;
std::cout << "Reading Left Image...";
leftPixels = loadImage("imageL.png", width, height);
std::cout << "Reading Right Image...";
rightPixels = loadImage("imageR.png", rightWidth, rightHeight);
// left and right images are assumed to be of same dimensions
assert(width == rightWidth && height == rightHeight);
width /= scaleFactor;
height /= scaleFactor;
unsigned imSize = width * height;
unsigned origSize = rightWidth * rightHeight;
std::vector<unsigned> output(imSize);
// Device variabels
unsigned char *d_origL, *d_origR;
unsigned *d_grayL, *d_grayR, *d_dispLR, *d_dispRL, *d_dispCC, *d_output;
CudaCall(cudaMalloc((void**) &d_origL, sizeof(unsigned char) * origSize * 4));
CudaCall(cudaMalloc((void**) &d_origR, sizeof(unsigned char) * origSize * 4));
CudaCall(cudaMalloc((void**) &d_grayL, sizeof(unsigned) * imSize));
CudaCall(cudaMalloc((void**) &d_grayR, sizeof(unsigned) * imSize));
CudaCall(cudaMalloc((void**) &d_dispLR, sizeof(unsigned) * imSize));
CudaCall(cudaMalloc((void**) &d_dispRL, sizeof(unsigned) * imSize));
CudaCall(cudaMalloc((void**) &d_dispCC, sizeof(unsigned) * imSize));
CudaCall(cudaMalloc((void**) &d_output, sizeof(unsigned) * imSize));
// Copy Data from host to device
CudaCall(cudaMemcpy(d_origL, leftPixels.data(), sizeof(leftPixels[0]) * leftPixels.size(), cudaMemcpyHostToDevice));
CudaCall(cudaMemcpy(d_origR, rightPixels.data(), sizeof(rightPixels[0]) * rightPixels.size(), cudaMemcpyHostToDevice));
// Profiling
float elapsed = 0;
cudaEvent_t start, stop;
CudaCall(cudaEventCreate(&start));
CudaCall(cudaEventCreate(&stop));
// Kernel Calls
dim3 blocks(height / 21, width / 21);
dim3 threads(21, 21);
dim3 blocks1D((height / 21) * (width / 21));
dim3 threads1D(21 * 21);
// Scale and Gray left
std::cout << "Converting Left Image to grayscale...";
CudaCall(cudaEventRecord(start));
ScaleAndGray<<<blocks, threads>>>(d_origL, d_grayL, rightWidth, rightHeight, scaleFactor);
CudaCall(cudaEventRecord(stop));
CudaCall(cudaEventSynchronize(stop));
CudaCall(cudaEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(cudaPeekAtLastError());
CudaCall(cudaDeviceSynchronize());
// Scale and Gray right
std::cout << "Converting Right Image to grayscale...";
CudaCall(cudaEventRecord(start));
ScaleAndGray<<<blocks, threads>>>(d_origR, d_grayR, rightWidth, rightHeight, scaleFactor);
CudaCall(cudaEventRecord(stop));
CudaCall(cudaEventSynchronize(stop));
CudaCall(cudaEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(cudaPeekAtLastError());
CudaCall(cudaDeviceSynchronize());
// Disparity Left over Right
std::cout << "Converting Left Disparity Map...";
CudaCall(cudaEventRecord(start));
Zncc<<<blocks, threads>>>(d_grayL, d_grayR, d_dispLR, width, height, 0, maxDisparity, windowWidth, windowHeight);
CudaCall(cudaEventRecord(stop));
CudaCall(cudaEventSynchronize(stop));
CudaCall(cudaEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(cudaPeekAtLastError());
CudaCall(cudaDeviceSynchronize());
// Disparity Right over Left
std::cout << "Converting Right Disparity Map...";
CudaCall(cudaEventRecord(start));
Zncc<<<blocks, threads>>>(d_grayR, d_grayL, d_dispRL, width, height, -maxDisparity, 0, windowWidth, windowHeight);
CudaCall(cudaEventRecord(stop));
CudaCall(cudaEventSynchronize(stop));
CudaCall(cudaEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(cudaPeekAtLastError());
CudaCall(cudaDeviceSynchronize());
// Cross Checking
std::cout << "Performing Cross Checking...";
CudaCall(cudaEventRecord(start));
CrossCheck<<<blocks1D, threads1D>>>(d_dispLR, d_dispRL, d_dispCC, imSize, crossCheckingThreshold);
CudaCall(cudaEventRecord(stop));
CudaCall(cudaEventSynchronize(stop));
CudaCall(cudaEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(cudaPeekAtLastError());
CudaCall(cudaDeviceSynchronize());
// Occlusion Filling
std::cout << "Performing Occlusion Filling...";
CudaCall(cudaEventRecord(start));
OcclusionFill<<<blocks, threads>>>(d_dispCC, d_output, width, height, occlusionNeighbours);
CudaCall(cudaEventRecord(stop));
CudaCall(cudaEventSynchronize(stop));
CudaCall(cudaEventElapsedTime(&elapsed, start, stop));
std::cout << "Done (" << elapsed / 1000 << " s)" << std::endl;
CudaCall(cudaPeekAtLastError());
CudaCall(cudaDeviceSynchronize());
// Copy data from device to host
CudaCall(cudaMemcpy(&output[0], d_output, sizeof(unsigned) * imSize, cudaMemcpyDeviceToHost));
lodepng::encode("output.png", normalize(output, width, height), width, height);
std::cout << "The program took " << timer.getElapsedTime() << " s" << std::endl;
cudaFree(d_origL);
cudaFree(d_origR);
cudaFree(d_grayL);
cudaFree(d_grayR);
cudaFree(d_dispLR);
cudaFree(d_dispRL);
cudaFree(d_dispCC);
cudaFree(d_output);
std::cin.get();
return 0;
}
std::vector<unsigned char> loadImage(const char* filename, unsigned& width, unsigned& height) {
Timer timer;
std::vector<unsigned char> pixels;
unsigned error = lodepng::decode(pixels, width, height, filename);
if (error) {
std::cout << "Failed to load image: " << lodepng_error_text(error) << std::endl;
std::cin.get();
exit(-1);
}
return pixels;
}
std::vector<unsigned char> normalize(
std::vector<unsigned> in,
const unsigned width,
const unsigned height
) {
std::vector<unsigned char> result(width * height * 4);
unsigned char max = 0;
unsigned char min = UCHAR_MAX;
for (int i = 0; i < width * height; i++) {
if (in[i] > max) {
max = in[i];
}
if (in[i] < min) {
min = in[i];
}
}
// Normalize values to be between 0 and 255
int mapIndex = 0;
for (int i = 0; i < width * height * 4; i += 4, mapIndex++) {
result[i] = result[i + 1] = result[i + 2] = (unsigned char)(255 * (in[mapIndex] - min) / (max - min));
result[i + 3] = 255;
}
return result;
}
|
2f29c0a292c92bc7027d7bdacb1a058636068b75.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeGradientCentralDiff.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *similarities = NULL;
hipMalloc(&similarities, XSIZE*YSIZE);
float *gradient = NULL;
hipMalloc(&gradient, XSIZE*YSIZE);
int *activeMask = NULL;
hipMalloc(&activeMask, XSIZE*YSIZE);
int activeSlices = 1;
int slices = 1;
int p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeGradientCentralDiff), dim3(gridBlock),dim3(threadBlock), 0, 0, similarities,gradient,activeMask,activeSlices,slices,p);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeGradientCentralDiff), dim3(gridBlock),dim3(threadBlock), 0, 0, similarities,gradient,activeMask,activeSlices,slices,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeGradientCentralDiff), dim3(gridBlock),dim3(threadBlock), 0, 0, similarities,gradient,activeMask,activeSlices,slices,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2f29c0a292c92bc7027d7bdacb1a058636068b75.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeGradientCentralDiff.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *similarities = NULL;
cudaMalloc(&similarities, XSIZE*YSIZE);
float *gradient = NULL;
cudaMalloc(&gradient, XSIZE*YSIZE);
int *activeMask = NULL;
cudaMalloc(&activeMask, XSIZE*YSIZE);
int activeSlices = 1;
int slices = 1;
int p = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeGradientCentralDiff<<<gridBlock,threadBlock>>>(similarities,gradient,activeMask,activeSlices,slices,p);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeGradientCentralDiff<<<gridBlock,threadBlock>>>(similarities,gradient,activeMask,activeSlices,slices,p);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeGradientCentralDiff<<<gridBlock,threadBlock>>>(similarities,gradient,activeMask,activeSlices,slices,p);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
32c0070d6068a82e890bd1ed7325491e89765fcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "book.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<stdio.h>
int main(void){
hipDeviceProp_t prop;
int count;
HANDLE_ERROR(hipGetDeviceCount(&count));
for(int i=0; i<count; i++){
HANDLE_ERROR(hipGetDeviceProperties(&prop, i));
printf("Name: %s \n", prop.name);
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
| 32c0070d6068a82e890bd1ed7325491e89765fcb.cu | #include "book.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<stdio.h>
int main(void){
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
for(int i=0; i<count; i++){
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
printf("Name: %s \n", prop.name);
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( "Kernel execition timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
e5071f8725345ed178fc69a335096d16939aac6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "collision.h"
__managed__ BodyType **d_bodies;
// Helper variables for rendering and checksum computation.
__device__ int r_draw_counter = 0;
__device__ float r_Body_pos_x[kNumBodies];
__device__ float r_Body_pos_y[kNumBodies];
__device__ float r_Body_vel_x[kNumBodies];
__device__ float r_Body_vel_y[kNumBodies];
__device__ float r_Body_mass[kNumBodies];
int host_draw_counter;
float host_Body_pos_x[kNumBodies];
float host_Body_pos_y[kNumBodies];
float host_Body_vel_x[kNumBodies];
float host_Body_vel_y[kNumBodies];
float host_Body_mass[kNumBodies];
float host_Body_is_active[kNumBodies];
__device__ void Body_apply_force(IndexT id, IndexT other) {
// Update `other`.
if (other != id) {
float dist;
float F;
CONCORDR(dist, d_bodies[other], computeDistance, (d_bodies[id]));
CONCORDR(F, d_bodies[other], computeForce, (d_bodies[id], dist));
CONCORD(d_bodies[other], updateForceX, (d_bodies[id], F));
CONCORD(d_bodies[other], updateForceY, (d_bodies[id], F));
}
}
__device__ void Body_compute_force(IndexT id) {
CONCORD(d_bodies[id], initForce, ());
// device_do
for (IndexT i = 0; i < kNumBodies; ++i) {
bool active;
CONCORDR(active, d_bodies[i], active, ());
if (active) {
Body_apply_force(i, id);
}
}
}
__device__ void Body_update(IndexT id) {
float idposx;
float idposy;
float idvelx;
float idvely;
CONCORD(d_bodies[id], updateVelX, ());
CONCORD(d_bodies[id], updateVelY, ());
CONCORD(d_bodies[id], updatePosX, ());
CONCORD(d_bodies[id], updatePosY, ());
CONCORDR(idposx, d_bodies[id], PosX,());
CONCORDR(idposy, d_bodies[id], PosY,());
CONCORDR(idvelx, d_bodies[id], VelX,());
CONCORDR(idvely, d_bodies[id], VelY,());
if (idposx < -1 || idposx > 1) {
CONCORD(d_bodies[id], set_VelX, (-idvelx));
}
if (idposy < -1 || idposy > 1) {
CONCORD(d_bodies[id], set_VelY, (-idvely));
}
}
__device__ void Body_check_merge_into_this(IndexT id, IndexT other) {
// Only merge into larger body.
bool cond1;
float othermass;
float idmass;
CONCORDR(cond1, d_bodies[other], get_incoming_merge, ());
CONCORDR(othermass, d_bodies[other], get_mass, ());
CONCORDR(idmass, d_bodies[id], get_mass, ());
if (!cond1 && idmass > othermass) {
float dist_square;
CONCORDR(dist_square, d_bodies[id], computeDistance, (d_bodies[other]));
dist_square *= dist_square;
if (dist_square < kMergeThreshold * kMergeThreshold) {
// Try to merge this one.
// There is a race condition here: Multiple threads may try to merge
// this body.
CONCORD(d_bodies[id], set_merge_target, (other));
CONCORD(d_bodies[other], set_incoming_merge, (true));
}
}
}
__device__ void Body_initialize_merge(IndexT id) {
CONCORD(d_bodies[id], set_merge_target, (kNullptr));
CONCORD(d_bodies[id], set_incoming_merge, (false));
CONCORD(d_bodies[id], set_is_successful_merge, (false));
}
__device__ void Body_prepare_merge(IndexT id) {
// device_do
for (IndexT i = 0; i < kNumBodies; ++i) {
bool active;
CONCORDR(active, d_bodies[i], active, ());
if (active) {
Body_check_merge_into_this(i, id);
}
}
}
__device__ void Body_update_merge(IndexT id) {
IndexT m = d_bodies[id]->get_merge_target();
if (m != kNullptr) {
if (d_bodies[m]->get_merge_target() == kNullptr) {
// Perform merge.
float mmass;
float new_mass;
float idvelx;
float idvely;
float mvelx;
float mvely;
float idmass;
CONCORDR(idmass, d_bodies[id], get_mass, ());
CONCORDR(mmass, d_bodies[m], get_mass, ());
new_mass = idmass + mmass;
CONCORDR(idvelx, d_bodies[id], VelX, ());
CONCORDR(idvely, d_bodies[id], VelY, ());
CONCORDR(mvelx, d_bodies[m], VelX, ());
CONCORDR(mvely, d_bodies[m], VelY, ());
float new_vel_x = (idvelx * idmass + mvelx * mmass) / new_mass;
float new_vel_y = (idvely * idmass + mvely * mmass) / new_mass;
CONCORD(d_bodies[m], set_mass, (new_mass));
CONCORD(d_bodies[m], set_VelX, (new_vel_x));
CONCORD(d_bodies[m], set_VelY, (new_vel_y));
float idposx;
float idposy;
float mposx;
float mposy;
CONCORDR(idposx, d_bodies[id], PosX, ());
CONCORDR(idposy, d_bodies[id], PosY, ());
CONCORDR(mposx, d_bodies[m], PosX, ());
CONCORDR(mposy, d_bodies[m], PosY, ());
CONCORD(d_bodies[m], set_PosX, ((idposx + mposx) / 2));
CONCORD(d_bodies[m], set_PosY, ((idposy + mposy) / 2));
CONCORD(d_bodies[id], set_is_successful_merge, (true));
}
}
}
__device__ void Body_delete_merged(IndexT id) {
bool cond;
CONCORDR(cond, d_bodies[id], get_is_successful_merge, ());
if (cond) {
CONCORD(d_bodies[id], set_active, (false));
}
}
__device__ void Body_add_to_draw_array(IndexT id) {
int idx = atomicAdd(&r_draw_counter, 1);
r_Body_pos_x[idx] = d_bodies[id]->pos_x;
r_Body_pos_y[idx] = d_bodies[id]->pos_y;
r_Body_vel_x[idx] = d_bodies[id]->vel_x;
r_Body_vel_y[idx] = d_bodies[id]->vel_y;
r_Body_mass[idx] = d_bodies[id]->mass;
}
__device__ void new_Body(IndexT id, float pos_x, float pos_y, float vel_x,
float vel_y, float mass) {
d_bodies[id]->pos_x = pos_x;
d_bodies[id]->pos_y = pos_y;
d_bodies[id]->vel_x = vel_x;
d_bodies[id]->vel_y = vel_y;
d_bodies[id]->mass = mass;
d_bodies[id]->is_active = true;
}
__global__ void kernel_initialize_bodies() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
hiprandState_t rand_state;
hiprand_init(kSeed, tid, 0, &rand_state);
for (int id = tid; id < kNumBodies; id += blockDim.x * gridDim.x) {
d_bodies[id] = new Body();
assert(d_bodies[id] != NULL);
new_Body(id,
/*pos_x=*/2 * hiprand_uniform(&rand_state) - 1,
/*pos_y=*/2 * hiprand_uniform(&rand_state) - 1,
/*vel_x=*/(hiprand_uniform(&rand_state) - 0.5) / 1000,
/*vel_y=*/(hiprand_uniform(&rand_state) - 0.5) / 1000,
/*mass=*/(hiprand_uniform(&rand_state) / 2 + 0.5) * kMaxMass);
}
}
__global__ void kernel_reset_draw_counters() { r_draw_counter = 0; }
template <void (*func)(IndexT)>
__global__ void parallel_do() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (int id = tid; id < kNumBodies; id += blockDim.x * gridDim.x) {
bool active;
CONCORDR(active, d_bodies[id], active, ());
if (active) {
func(id);
}
}
}
template <void (*func)(IndexT)>
__global__ void parallel_init() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (int id = tid; id < kNumBodies; id += blockDim.x * gridDim.x) {
if (d_bodies[id]->active()) {
func(id);
}
}
}
void transfer_data() {
// Extract data from SoaAlloc data structure.
hipLaunchKernelGGL(( kernel_reset_draw_counters), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( parallel_init<&Body_add_to_draw_array>), dim3(kBlocks), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
// Copy data to host.
hipMemcpyFromSymbol(host_Body_pos_x, r_Body_pos_x,
sizeof(float) * kNumBodies, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(host_Body_pos_y, r_Body_pos_y,
sizeof(float) * kNumBodies, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(host_Body_vel_x, r_Body_vel_x,
sizeof(float) * kNumBodies, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(host_Body_vel_y, r_Body_vel_y,
sizeof(float) * kNumBodies, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(host_Body_mass, r_Body_mass,
sizeof(float) * kNumBodies, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&host_draw_counter, r_draw_counter, sizeof(int), 0,
hipMemcpyDeviceToHost);
}
int checksum() {
transfer_data();
int result = 0;
for (int i = 0; i < kNumBodies; ++i) {
int Body_checksum =
static_cast<int>(
(host_Body_pos_x[i] * 1000 + host_Body_pos_y[i] * 2000 +
host_Body_vel_x[i] * 3000 + host_Body_vel_y[i] * 4000)) %
123456;
result += Body_checksum;
}
return result;
}
int main(int /*argc*/, char ** /*argv*/) {
#ifdef OPTION_RENDER
init_renderer();
#endif // OPTION_RENDER
// Allocate memory.
hipMallocManaged(&d_bodies, sizeof(BodyType *) * kNumBodies);
// Allocate and create Body objects.
hipLaunchKernelGGL(( kernel_initialize_bodies), dim3(128), dim3(128), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
#ifdef OPTION_RENDER
// Compute max_mass.
float max_mass = 0.0f;
transfer_data();
for (int i = 0; i < host_draw_counter; ++i) {
max_mass += host_Body_mass[i];
}
#endif // OPTION_RENDER
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kIterations; ++i) {
printf("%i\n", i);
hipLaunchKernelGGL(( parallel_do<&Body_compute_force>), dim3(kBlocks), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( parallel_do<&Body_update>), dim3(kBlocks), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( parallel_do<&Body_initialize_merge>), dim3(kBlocks), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( parallel_do<&Body_prepare_merge>), dim3(kBlocks), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( parallel_do<&Body_update_merge>), dim3(kBlocks), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( parallel_do<&Body_delete_merged>), dim3(kBlocks), dim3(kThreads), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
printf("#bodies: %i\n", host_draw_counter);
#endif // NDEBUG
printf("%lu\n", micros);
// Free memory
#ifdef OPTION_RENDER
close_renderer();
#endif // OPTION_RENDER
return 0;
}
| e5071f8725345ed178fc69a335096d16939aac6c.cu |
#include "collision.h"
__managed__ BodyType **d_bodies;
// Helper variables for rendering and checksum computation.
__device__ int r_draw_counter = 0;
__device__ float r_Body_pos_x[kNumBodies];
__device__ float r_Body_pos_y[kNumBodies];
__device__ float r_Body_vel_x[kNumBodies];
__device__ float r_Body_vel_y[kNumBodies];
__device__ float r_Body_mass[kNumBodies];
int host_draw_counter;
float host_Body_pos_x[kNumBodies];
float host_Body_pos_y[kNumBodies];
float host_Body_vel_x[kNumBodies];
float host_Body_vel_y[kNumBodies];
float host_Body_mass[kNumBodies];
float host_Body_is_active[kNumBodies];
__device__ void Body_apply_force(IndexT id, IndexT other) {
// Update `other`.
if (other != id) {
float dist;
float F;
CONCORDR(dist, d_bodies[other], computeDistance, (d_bodies[id]));
CONCORDR(F, d_bodies[other], computeForce, (d_bodies[id], dist));
CONCORD(d_bodies[other], updateForceX, (d_bodies[id], F));
CONCORD(d_bodies[other], updateForceY, (d_bodies[id], F));
}
}
__device__ void Body_compute_force(IndexT id) {
CONCORD(d_bodies[id], initForce, ());
// device_do
for (IndexT i = 0; i < kNumBodies; ++i) {
bool active;
CONCORDR(active, d_bodies[i], active, ());
if (active) {
Body_apply_force(i, id);
}
}
}
__device__ void Body_update(IndexT id) {
float idposx;
float idposy;
float idvelx;
float idvely;
CONCORD(d_bodies[id], updateVelX, ());
CONCORD(d_bodies[id], updateVelY, ());
CONCORD(d_bodies[id], updatePosX, ());
CONCORD(d_bodies[id], updatePosY, ());
CONCORDR(idposx, d_bodies[id], PosX,());
CONCORDR(idposy, d_bodies[id], PosY,());
CONCORDR(idvelx, d_bodies[id], VelX,());
CONCORDR(idvely, d_bodies[id], VelY,());
if (idposx < -1 || idposx > 1) {
CONCORD(d_bodies[id], set_VelX, (-idvelx));
}
if (idposy < -1 || idposy > 1) {
CONCORD(d_bodies[id], set_VelY, (-idvely));
}
}
__device__ void Body_check_merge_into_this(IndexT id, IndexT other) {
// Only merge into larger body.
bool cond1;
float othermass;
float idmass;
CONCORDR(cond1, d_bodies[other], get_incoming_merge, ());
CONCORDR(othermass, d_bodies[other], get_mass, ());
CONCORDR(idmass, d_bodies[id], get_mass, ());
if (!cond1 && idmass > othermass) {
float dist_square;
CONCORDR(dist_square, d_bodies[id], computeDistance, (d_bodies[other]));
dist_square *= dist_square;
if (dist_square < kMergeThreshold * kMergeThreshold) {
// Try to merge this one.
// There is a race condition here: Multiple threads may try to merge
// this body.
CONCORD(d_bodies[id], set_merge_target, (other));
CONCORD(d_bodies[other], set_incoming_merge, (true));
}
}
}
__device__ void Body_initialize_merge(IndexT id) {
CONCORD(d_bodies[id], set_merge_target, (kNullptr));
CONCORD(d_bodies[id], set_incoming_merge, (false));
CONCORD(d_bodies[id], set_is_successful_merge, (false));
}
__device__ void Body_prepare_merge(IndexT id) {
// device_do
for (IndexT i = 0; i < kNumBodies; ++i) {
bool active;
CONCORDR(active, d_bodies[i], active, ());
if (active) {
Body_check_merge_into_this(i, id);
}
}
}
__device__ void Body_update_merge(IndexT id) {
IndexT m = d_bodies[id]->get_merge_target();
if (m != kNullptr) {
if (d_bodies[m]->get_merge_target() == kNullptr) {
// Perform merge.
float mmass;
float new_mass;
float idvelx;
float idvely;
float mvelx;
float mvely;
float idmass;
CONCORDR(idmass, d_bodies[id], get_mass, ());
CONCORDR(mmass, d_bodies[m], get_mass, ());
new_mass = idmass + mmass;
CONCORDR(idvelx, d_bodies[id], VelX, ());
CONCORDR(idvely, d_bodies[id], VelY, ());
CONCORDR(mvelx, d_bodies[m], VelX, ());
CONCORDR(mvely, d_bodies[m], VelY, ());
float new_vel_x = (idvelx * idmass + mvelx * mmass) / new_mass;
float new_vel_y = (idvely * idmass + mvely * mmass) / new_mass;
CONCORD(d_bodies[m], set_mass, (new_mass));
CONCORD(d_bodies[m], set_VelX, (new_vel_x));
CONCORD(d_bodies[m], set_VelY, (new_vel_y));
float idposx;
float idposy;
float mposx;
float mposy;
CONCORDR(idposx, d_bodies[id], PosX, ());
CONCORDR(idposy, d_bodies[id], PosY, ());
CONCORDR(mposx, d_bodies[m], PosX, ());
CONCORDR(mposy, d_bodies[m], PosY, ());
CONCORD(d_bodies[m], set_PosX, ((idposx + mposx) / 2));
CONCORD(d_bodies[m], set_PosY, ((idposy + mposy) / 2));
CONCORD(d_bodies[id], set_is_successful_merge, (true));
}
}
}
__device__ void Body_delete_merged(IndexT id) {
bool cond;
CONCORDR(cond, d_bodies[id], get_is_successful_merge, ());
if (cond) {
CONCORD(d_bodies[id], set_active, (false));
}
}
__device__ void Body_add_to_draw_array(IndexT id) {
int idx = atomicAdd(&r_draw_counter, 1);
r_Body_pos_x[idx] = d_bodies[id]->pos_x;
r_Body_pos_y[idx] = d_bodies[id]->pos_y;
r_Body_vel_x[idx] = d_bodies[id]->vel_x;
r_Body_vel_y[idx] = d_bodies[id]->vel_y;
r_Body_mass[idx] = d_bodies[id]->mass;
}
__device__ void new_Body(IndexT id, float pos_x, float pos_y, float vel_x,
float vel_y, float mass) {
d_bodies[id]->pos_x = pos_x;
d_bodies[id]->pos_y = pos_y;
d_bodies[id]->vel_x = vel_x;
d_bodies[id]->vel_y = vel_y;
d_bodies[id]->mass = mass;
d_bodies[id]->is_active = true;
}
__global__ void kernel_initialize_bodies() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
curandState rand_state;
curand_init(kSeed, tid, 0, &rand_state);
for (int id = tid; id < kNumBodies; id += blockDim.x * gridDim.x) {
d_bodies[id] = new Body();
assert(d_bodies[id] != NULL);
new_Body(id,
/*pos_x=*/2 * curand_uniform(&rand_state) - 1,
/*pos_y=*/2 * curand_uniform(&rand_state) - 1,
/*vel_x=*/(curand_uniform(&rand_state) - 0.5) / 1000,
/*vel_y=*/(curand_uniform(&rand_state) - 0.5) / 1000,
/*mass=*/(curand_uniform(&rand_state) / 2 + 0.5) * kMaxMass);
}
}
__global__ void kernel_reset_draw_counters() { r_draw_counter = 0; }
template <void (*func)(IndexT)>
__global__ void parallel_do() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (int id = tid; id < kNumBodies; id += blockDim.x * gridDim.x) {
bool active;
CONCORDR(active, d_bodies[id], active, ());
if (active) {
func(id);
}
}
}
template <void (*func)(IndexT)>
__global__ void parallel_init() {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
for (int id = tid; id < kNumBodies; id += blockDim.x * gridDim.x) {
if (d_bodies[id]->active()) {
func(id);
}
}
}
void transfer_data() {
// Extract data from SoaAlloc data structure.
kernel_reset_draw_counters<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
parallel_init<&Body_add_to_draw_array><<<kBlocks, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
// Copy data to host.
cudaMemcpyFromSymbol(host_Body_pos_x, r_Body_pos_x,
sizeof(float) * kNumBodies, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(host_Body_pos_y, r_Body_pos_y,
sizeof(float) * kNumBodies, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(host_Body_vel_x, r_Body_vel_x,
sizeof(float) * kNumBodies, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(host_Body_vel_y, r_Body_vel_y,
sizeof(float) * kNumBodies, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(host_Body_mass, r_Body_mass,
sizeof(float) * kNumBodies, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&host_draw_counter, r_draw_counter, sizeof(int), 0,
cudaMemcpyDeviceToHost);
}
int checksum() {
transfer_data();
int result = 0;
for (int i = 0; i < kNumBodies; ++i) {
int Body_checksum =
static_cast<int>(
(host_Body_pos_x[i] * 1000 + host_Body_pos_y[i] * 2000 +
host_Body_vel_x[i] * 3000 + host_Body_vel_y[i] * 4000)) %
123456;
result += Body_checksum;
}
return result;
}
int main(int /*argc*/, char ** /*argv*/) {
#ifdef OPTION_RENDER
init_renderer();
#endif // OPTION_RENDER
// Allocate memory.
cudaMallocManaged(&d_bodies, sizeof(BodyType *) * kNumBodies);
// Allocate and create Body objects.
kernel_initialize_bodies<<<128, 128>>>();
gpuErrchk(cudaDeviceSynchronize());
#ifdef OPTION_RENDER
// Compute max_mass.
float max_mass = 0.0f;
transfer_data();
for (int i = 0; i < host_draw_counter; ++i) {
max_mass += host_Body_mass[i];
}
#endif // OPTION_RENDER
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kIterations; ++i) {
printf("%i\n", i);
parallel_do<&Body_compute_force><<<kBlocks, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
parallel_do<&Body_update><<<kBlocks, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
parallel_do<&Body_initialize_merge><<<kBlocks, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
parallel_do<&Body_prepare_merge><<<kBlocks, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
parallel_do<&Body_update_merge><<<kBlocks, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
parallel_do<&Body_delete_merged><<<kBlocks, kThreads>>>();
gpuErrchk(cudaDeviceSynchronize());
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros =
std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
printf("#bodies: %i\n", host_draw_counter);
#endif // NDEBUG
printf("%lu\n", micros);
// Free memory
#ifdef OPTION_RENDER
close_renderer();
#endif // OPTION_RENDER
return 0;
}
|
cdbbe81200f56dc488b921416ccdfb8dbed1c924.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm --use_fast_math
*/
#include "../CommonCompileFiles/binaryStarCommonIncludes.h"
#include "../CommonCompileFiles/binaryStarCommonDefines.h"
#include "../CommonCompileFiles/binaryStarCommonGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonFunctions.h"
#include "../CommonCompileFiles/binaryStarCommonRunGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonRunFunctions.h"
//Time to add on to the run. Readin from the comand line.
float ContinueRunTime;
void openAndReadFiles()
{
ifstream data;
string name;
//Opening the positions and velosity file to dump stuff to make movies out of. Need to move to the end of the file.
PosAndVelFile = fopen("PosAndVel", "rb+");
if(PosAndVelFile == NULL)
{
printf("\n\n The PosAndVel file does not exist\n\n");
exit(0);
}
fseek(PosAndVelFile,0,SEEK_END);
//Reading in the run parameters
data.open("RunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> SystemLengthConverterToKilometers;
getline(data,name,'=');
data >> SystemMassConverterToKilograms;
getline(data,name,'=');
data >> SystemTimeConverterToSeconds;
getline(data,name,'=');
data >> NumberElementsStar1;
getline(data,name,'=');
data >> NumberElementsStar2;
getline(data,name,'=');
data >> CoreCorePushBackReduction;
getline(data,name,'=');
data >> CorePlasmaPushBackReduction;
getline(data,name,'=');
data >> PlasmaPlasmaPushBackReduction;
getline(data,name,'=');
data >> Dt;
getline(data,name,'=');
data >> ZoomFactor;
getline(data,name,'=');
data >> PrintRate;
getline(data,name,'=');
data >> Core1Color.x;
getline(data,name,'=');
data >> Core1Color.y;
getline(data,name,'=');
data >> Core1Color.z;
getline(data,name,'=');
data >> Core2Color.x;
getline(data,name,'=');
data >> Core2Color.y;
getline(data,name,'=');
data >> Core2Color.z;
getline(data,name,'=');
data >> Envelope1Color.x;
getline(data,name,'=');
data >> Envelope1Color.y;
getline(data,name,'=');
data >> Envelope1Color.z;
getline(data,name,'=');
data >> Envelope2Color.x;
getline(data,name,'=');
data >> Envelope2Color.y;
getline(data,name,'=');
data >> Envelope2Color.z;
getline(data,name,'=');
data >> RadiusCore1;
getline(data,name,'=');
data >> RadiusCore2;
}
else
{
printf("\nTSU Error could not open RunParameters file\n");
exit(0);
}
data.close();
NumberElements = NumberElementsStar1 + NumberElementsStar2;
ContinueRunTime *=((24.0*60.0*60.0)/SystemTimeConverterToSeconds);
//Reading in the run parameters
data.open("BranchRunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> RecordRate;
getline(data,name,'=');
data >> DrawRate;
}
else
{
printf("\nTSU Error could not open BranchRunParameters file\n");
exit(0);
}
data.close();
}
void readInTheInitialsStars()
{
FILE *startFile = fopen("FinalPosVelForce","rb");
if(startFile == NULL)
{
printf("\n\n The FinalPosVelForce file does not exist\n\n");
exit(0);
}
fread(&StartTime, sizeof(float), 1, startFile);
fread(PosCPU, sizeof(float4), NumberElements, startFile);
fread(VelCPU, sizeof(float4), NumberElements, startFile);
fread(ForceCPU, sizeof(float4), NumberElements, startFile);
fclose(startFile);
}
__global__ void getForces(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int numberOfElements, float coreCorePushBackReduction, float corePlasmaPushBackReduction, float plasmaPlasmaPushBackReduction, int gPUNumber, int gPUsUsed)
{
int id, ids, i, j, k;
float4 posMe, velMe, forceMe;
float4 partialForce;
double forceSumX, forceSumY, forceSumZ;
__shared__ float4 shPos[BLOCKSIZE];
__shared__ float4 shVel[BLOCKSIZE];
__shared__ float4 shForce[BLOCKSIZE];
id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
if(numberOfElements <= id)
{
printf("\n TSU error: id out of bounds in getForces. \n");
}
forceSumX = 0.0;
forceSumY = 0.0;
forceSumZ = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
velMe.x = vel[id].x;
velMe.y = vel[id].y;
velMe.z = vel[id].z;
velMe.w = vel[id].w;
forceMe.x = force[id].x;
forceMe.y = force[id].y;
forceMe.z = force[id].z;
forceMe.w = force[id].w;
for(k =0; k < gPUsUsed; k++)
{
for(j = 0; j < gridDim.x; j++)
{
shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
__syncthreads();
#pragma unroll 32
for(i = 0; i < blockDim.x; i++)
{
ids = i + blockDim.x*j + blockDim.x*gridDim.x*k;
if(id != ids)
{
if(id == 0 && ids == numberElementsStar1)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], coreCorePushBackReduction);
}
else if(id == numberElementsStar1 && ids == 0)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], coreCorePushBackReduction);
}
else if(id == 0 || id == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction);
}
else if(ids == 0 || ids == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction);
}
else
{
partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPlasmaPushBackReduction);
}
forceSumX += partialForce.x;
forceSumY += partialForce.y;
forceSumZ += partialForce.z;
}
}
__syncthreads();
}
}
force[id].x = (float)forceSumX;
force[id].y = (float)forceSumY;
force[id].z = (float)forceSumZ;
}
__global__ void moveBodies(float4 *pos, float4 *vel, float4 *force, float dt, int gPUNumber)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
vel[id].x += (force[id].x/pos[id].w)*dt;
vel[id].y += (force[id].y/pos[id].w)*dt;
vel[id].z += (force[id].z/pos[id].w)*dt;
pos[id].x += vel[id].x*dt;
pos[id].y += vel[id].y*dt;
pos[id].z += vel[id].z*dt;
}
float starNbody(float time, float runTime, float dt, int gPUsUsed)
{
int tDraw = 0;
int tRecord = 0;
int tBackup = 0;
float printTime = 0.0;
int backupRate = 10000;
while(time < runTime)
{
int offSet = NumberElements/gPUsUsed;
//Getting forces
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
hipLaunchKernelGGL(( getForces), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, CoreCorePushBackReduction, CorePlasmaPushBackReduction, PlasmaPlasmaPushBackReduction, i, gPUsUsed);
errorCheck("getForces");
}
//Moving elements
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
hipLaunchKernelGGL(( moveBodies), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosGPU[i], VelGPU[i], ForceGPU[i], dt, i);
errorCheck("moveBodies");
}
hipDeviceSynchronize();
errorCheck("hipDeviceSynchronize");
//Sharing memory
for(int i = 0; i < gPUsUsed; i++)
{
hipSetDevice(i);
errorCheck("hipSetDevice");
for(int j = 0; j < gPUsUsed; j++)
{
if(i != j)
{
hipMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
errorCheck("hipMemcpy Pos");
hipMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), hipMemcpyDeviceToDevice);
errorCheck("hipMemcpy Vel");
}
}
}
hipDeviceSynchronize();
errorCheck("hipDeviceSynchronize");
time += dt;
tDraw++;
if(tDraw == DrawRate)
{
//Because it is shared above it will only need to be copied from one GPU.
hipSetDevice(0);
errorCheck("hipSetDevice");
hipMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Pos draw");
drawPicture();
tDraw = 0;
//printf("\n Time in days = %f", time*SystemTimeConverterToSeconds/(60.0*60.0*24.0));
}
tRecord++;
if(tRecord == RecordRate)
{
//Because it is shared above it will only need to be copied from one GPU.
hipSetDevice(0);
errorCheck("hipSetDevice");
hipMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Pos record");
hipMemcpy(VelCPU, VelGPU[0], (NumberElements)*sizeof(float4), hipMemcpyDeviceToHost);
errorCheck("hipMemcpy Vel record");
recordPosAndVel(time);
tRecord = 0;
}
tBackup++;
if(tBackup == backupRate)
{
//Because it is shared above it will only need to be copied from one GPU.
//Saving the the runs positions, velosities and forces incase the system crashes in the middle of a run
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
tBackup = 0;
}
printTime += dt;
if(PrintRate <= printTime)
{
printf("\n Time = %f days", time/(24.0*3600.0/SystemTimeConverterToSeconds));
printTime = 0.0;
}
}
printf("\n Total run time = %f days", (time-Dt)/(24.0*3600.0/SystemTimeConverterToSeconds));
return(time - dt);
}
void control()
{
struct sigaction sa;
float time = StartTime;
clock_t startTimer, endTimer;
int gPUsUsed;
//Starting the timer.
startTimer = clock();
// Handling input from the screen.
sa.sa_handler = signalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler
if (sigaction(SIGINT, &sa, NULL) == -1)
{
printf("\nTSU Error: sigaction error\n");
}
// Reading in the build parameters.
printf("\n Reading and setting the run parameters.\n");
openAndReadFiles();
// Allocating memory for CPU and GPU.
printf("\n Allocating memory on the GPU and CPU and opening positions and velocities file.\n");
allocateCPUMemory();
// Reading in the raw stars generated by the build program.
printf("\n Reading in the stars that were generated in the build program.\n");
readInTheInitialsStars();
// Draw the intial configuration.
printf("\n Drawing initial picture.\n");
drawPicture();
// Seting up the GPU.
printf("\n Setting up the GPU.\n");
gPUsUsed = deviceSetup();
// Running the simulation.
printf("\n Running the simulation.\n");
copyStarsUpToGPU(gPUsUsed);
time = starNbody(StartTime, StartTime + ContinueRunTime, Dt, gPUsUsed);
// Saving the the runs final positions and velosities.
printf("\n Saving the the runs final positions and velosities.\n");
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
// Saving any wanted stats about the run that you may want. I don't have anything to record as of yet.
printf("\n Saving any wanted stats about the run that you may want.\n");
//recordStarStats();
// Freeing memory.
printf("\n Cleaning up the run.\n");
cleanUp(gPUsUsed);
fclose(PosAndVelFile);
// Stopping timer and printing out run time.
endTimer = clock();
int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC;
int hours = seconds/3600;
int minutes = (seconds - hours*3600)/60;
seconds = seconds - hours*3600 - minutes*60;
printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds);
printf("\n The run has finished successfully \n\n");
exit(0);
}
int main(int argc, char** argv)
{
if( argc < 2)
{
printf("\n You need to intire an amount of time to add to the run on the comand line\n");
exit(0);
}
else
{
ContinueRunTime = atof(argv[1]); //Reading time in as days. Need to put in our units after paranter file is read in.
}
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("Creating Stars");
glutReshapeFunc(reshape);
init();
glShadeModel(GL_SMOOTH);
glClearColor(0.0, 0.0, 0.0, 0.0);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutIdleFunc(control);
glutMainLoop();
return 0;
}
| cdbbe81200f56dc488b921416ccdfb8dbed1c924.cu | /*
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm
nvcc StarContinueRun.cu -o StarContinueRun.exe -lglut -lGL -lGLU -lm --use_fast_math
*/
#include "../CommonCompileFiles/binaryStarCommonIncludes.h"
#include "../CommonCompileFiles/binaryStarCommonDefines.h"
#include "../CommonCompileFiles/binaryStarCommonGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonFunctions.h"
#include "../CommonCompileFiles/binaryStarCommonRunGlobals.h"
#include "../CommonCompileFiles/binaryStarCommonRunFunctions.h"
//Time to add on to the run. Readin from the comand line.
float ContinueRunTime;
void openAndReadFiles()
{
ifstream data;
string name;
//Opening the positions and velosity file to dump stuff to make movies out of. Need to move to the end of the file.
PosAndVelFile = fopen("PosAndVel", "rb+");
if(PosAndVelFile == NULL)
{
printf("\n\n The PosAndVel file does not exist\n\n");
exit(0);
}
fseek(PosAndVelFile,0,SEEK_END);
//Reading in the run parameters
data.open("RunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> SystemLengthConverterToKilometers;
getline(data,name,'=');
data >> SystemMassConverterToKilograms;
getline(data,name,'=');
data >> SystemTimeConverterToSeconds;
getline(data,name,'=');
data >> NumberElementsStar1;
getline(data,name,'=');
data >> NumberElementsStar2;
getline(data,name,'=');
data >> CoreCorePushBackReduction;
getline(data,name,'=');
data >> CorePlasmaPushBackReduction;
getline(data,name,'=');
data >> PlasmaPlasmaPushBackReduction;
getline(data,name,'=');
data >> Dt;
getline(data,name,'=');
data >> ZoomFactor;
getline(data,name,'=');
data >> PrintRate;
getline(data,name,'=');
data >> Core1Color.x;
getline(data,name,'=');
data >> Core1Color.y;
getline(data,name,'=');
data >> Core1Color.z;
getline(data,name,'=');
data >> Core2Color.x;
getline(data,name,'=');
data >> Core2Color.y;
getline(data,name,'=');
data >> Core2Color.z;
getline(data,name,'=');
data >> Envelope1Color.x;
getline(data,name,'=');
data >> Envelope1Color.y;
getline(data,name,'=');
data >> Envelope1Color.z;
getline(data,name,'=');
data >> Envelope2Color.x;
getline(data,name,'=');
data >> Envelope2Color.y;
getline(data,name,'=');
data >> Envelope2Color.z;
getline(data,name,'=');
data >> RadiusCore1;
getline(data,name,'=');
data >> RadiusCore2;
}
else
{
printf("\nTSU Error could not open RunParameters file\n");
exit(0);
}
data.close();
NumberElements = NumberElementsStar1 + NumberElementsStar2;
ContinueRunTime *=((24.0*60.0*60.0)/SystemTimeConverterToSeconds);
//Reading in the run parameters
data.open("BranchRunParameters");
if(data.is_open() == 1)
{
getline(data,name,'=');
data >> RecordRate;
getline(data,name,'=');
data >> DrawRate;
}
else
{
printf("\nTSU Error could not open BranchRunParameters file\n");
exit(0);
}
data.close();
}
void readInTheInitialsStars()
{
FILE *startFile = fopen("FinalPosVelForce","rb");
if(startFile == NULL)
{
printf("\n\n The FinalPosVelForce file does not exist\n\n");
exit(0);
}
fread(&StartTime, sizeof(float), 1, startFile);
fread(PosCPU, sizeof(float4), NumberElements, startFile);
fread(VelCPU, sizeof(float4), NumberElements, startFile);
fread(ForceCPU, sizeof(float4), NumberElements, startFile);
fclose(startFile);
}
__global__ void getForces(float4 *pos, float4 *vel, float4 *force, int numberElementsStar1, int numberOfElements, float coreCorePushBackReduction, float corePlasmaPushBackReduction, float plasmaPlasmaPushBackReduction, int gPUNumber, int gPUsUsed)
{
int id, ids, i, j, k;
float4 posMe, velMe, forceMe;
float4 partialForce;
double forceSumX, forceSumY, forceSumZ;
__shared__ float4 shPos[BLOCKSIZE];
__shared__ float4 shVel[BLOCKSIZE];
__shared__ float4 shForce[BLOCKSIZE];
id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
if(numberOfElements <= id)
{
printf("\n TSU error: id out of bounds in getForces. \n");
}
forceSumX = 0.0;
forceSumY = 0.0;
forceSumZ = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
velMe.x = vel[id].x;
velMe.y = vel[id].y;
velMe.z = vel[id].z;
velMe.w = vel[id].w;
forceMe.x = force[id].x;
forceMe.y = force[id].y;
forceMe.z = force[id].z;
forceMe.w = force[id].w;
for(k =0; k < gPUsUsed; k++)
{
for(j = 0; j < gridDim.x; j++)
{
shPos[threadIdx.x] = pos [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shVel[threadIdx.x] = vel [threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
shForce[threadIdx.x] = force[threadIdx.x + blockDim.x*j + blockDim.x*gridDim.x*k];
__syncthreads();
#pragma unroll 32
for(i = 0; i < blockDim.x; i++)
{
ids = i + blockDim.x*j + blockDim.x*gridDim.x*k;
if(id != ids)
{
if(id == 0 && ids == numberElementsStar1)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], coreCorePushBackReduction);
}
else if(id == numberElementsStar1 && ids == 0)
{
partialForce = calculateCoreCoreForce(posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], coreCorePushBackReduction);
}
else if(id == 0 || id == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(0, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction);
}
else if(ids == 0 || ids == numberElementsStar1)
{
partialForce = calculateCorePlasmaForce(1, posMe, shPos[i], velMe, shVel[i], forceMe, shForce[i], corePlasmaPushBackReduction);
}
else
{
partialForce = calculatePlasmaPlasmaForce(posMe, shPos[i], velMe, shVel[i], plasmaPlasmaPushBackReduction);
}
forceSumX += partialForce.x;
forceSumY += partialForce.y;
forceSumZ += partialForce.z;
}
}
__syncthreads();
}
}
force[id].x = (float)forceSumX;
force[id].y = (float)forceSumY;
force[id].z = (float)forceSumZ;
}
__global__ void moveBodies(float4 *pos, float4 *vel, float4 *force, float dt, int gPUNumber)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + blockDim.x*gridDim.x*gPUNumber;
vel[id].x += (force[id].x/pos[id].w)*dt;
vel[id].y += (force[id].y/pos[id].w)*dt;
vel[id].z += (force[id].z/pos[id].w)*dt;
pos[id].x += vel[id].x*dt;
pos[id].y += vel[id].y*dt;
pos[id].z += vel[id].z*dt;
}
float starNbody(float time, float runTime, float dt, int gPUsUsed)
{
int tDraw = 0;
int tRecord = 0;
int tBackup = 0;
float printTime = 0.0;
int backupRate = 10000;
while(time < runTime)
{
int offSet = NumberElements/gPUsUsed;
//Getting forces
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
getForces<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], NumberElementsStar1, NumberElements, CoreCorePushBackReduction, CorePlasmaPushBackReduction, PlasmaPlasmaPushBackReduction, i, gPUsUsed);
errorCheck("getForces");
}
//Moving elements
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
moveBodies<<<GridConfig, BlockConfig>>>(PosGPU[i], VelGPU[i], ForceGPU[i], dt, i);
errorCheck("moveBodies");
}
cudaDeviceSynchronize();
errorCheck("cudaDeviceSynchronize");
//Sharing memory
for(int i = 0; i < gPUsUsed; i++)
{
cudaSetDevice(i);
errorCheck("cudaSetDevice");
for(int j = 0; j < gPUsUsed; j++)
{
if(i != j)
{
cudaMemcpyAsync(&PosGPU[j][i*offSet], &PosGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
errorCheck("cudaMemcpy Pos");
cudaMemcpyAsync(&VelGPU[j][i*offSet], &VelGPU[i][i*offSet], (NumberElements/gPUsUsed)*sizeof(float4), cudaMemcpyDeviceToDevice);
errorCheck("cudaMemcpy Vel");
}
}
}
cudaDeviceSynchronize();
errorCheck("cudaDeviceSynchronize");
time += dt;
tDraw++;
if(tDraw == DrawRate)
{
//Because it is shared above it will only need to be copied from one GPU.
cudaSetDevice(0);
errorCheck("cudaSetDevice");
cudaMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Pos draw");
drawPicture();
tDraw = 0;
//printf("\n Time in days = %f", time*SystemTimeConverterToSeconds/(60.0*60.0*24.0));
}
tRecord++;
if(tRecord == RecordRate)
{
//Because it is shared above it will only need to be copied from one GPU.
cudaSetDevice(0);
errorCheck("cudaSetDevice");
cudaMemcpy(PosCPU, PosGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Pos record");
cudaMemcpy(VelCPU, VelGPU[0], (NumberElements)*sizeof(float4), cudaMemcpyDeviceToHost);
errorCheck("cudaMemcpy Vel record");
recordPosAndVel(time);
tRecord = 0;
}
tBackup++;
if(tBackup == backupRate)
{
//Because it is shared above it will only need to be copied from one GPU.
//Saving the the runs positions, velosities and forces incase the system crashes in the middle of a run
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
tBackup = 0;
}
printTime += dt;
if(PrintRate <= printTime)
{
printf("\n Time = %f days", time/(24.0*3600.0/SystemTimeConverterToSeconds));
printTime = 0.0;
}
}
printf("\n Total run time = %f days", (time-Dt)/(24.0*3600.0/SystemTimeConverterToSeconds));
return(time - dt);
}
void control()
{
struct sigaction sa;
float time = StartTime;
clock_t startTimer, endTimer;
int gPUsUsed;
//Starting the timer.
startTimer = clock();
// Handling input from the screen.
sa.sa_handler = signalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler
if (sigaction(SIGINT, &sa, NULL) == -1)
{
printf("\nTSU Error: sigaction error\n");
}
// Reading in the build parameters.
printf("\n Reading and setting the run parameters.\n");
openAndReadFiles();
// Allocating memory for CPU and GPU.
printf("\n Allocating memory on the GPU and CPU and opening positions and velocities file.\n");
allocateCPUMemory();
// Reading in the raw stars generated by the build program.
printf("\n Reading in the stars that were generated in the build program.\n");
readInTheInitialsStars();
// Draw the intial configuration.
printf("\n Drawing initial picture.\n");
drawPicture();
// Seting up the GPU.
printf("\n Setting up the GPU.\n");
gPUsUsed = deviceSetup();
// Running the simulation.
printf("\n Running the simulation.\n");
copyStarsUpToGPU(gPUsUsed);
time = starNbody(StartTime, StartTime + ContinueRunTime, Dt, gPUsUsed);
// Saving the the runs final positions and velosities.
printf("\n Saving the the runs final positions and velosities.\n");
copyStarsDownFromGPU();
recordFinalPosVelForceStars(time);
// Saving any wanted stats about the run that you may want. I don't have anything to record as of yet.
printf("\n Saving any wanted stats about the run that you may want.\n");
//recordStarStats();
// Freeing memory.
printf("\n Cleaning up the run.\n");
cleanUp(gPUsUsed);
fclose(PosAndVelFile);
// Stopping timer and printing out run time.
endTimer = clock();
int seconds = (endTimer - startTimer)/CLOCKS_PER_SEC;
int hours = seconds/3600;
int minutes = (seconds - hours*3600)/60;
seconds = seconds - hours*3600 - minutes*60;
printf("\n Total time taken for this run: %d hours %d minutes %d seconds\n", hours, minutes, seconds);
printf("\n The run has finished successfully \n\n");
exit(0);
}
int main(int argc, char** argv)
{
if( argc < 2)
{
printf("\n You need to intire an amount of time to add to the run on the comand line\n");
exit(0);
}
else
{
ContinueRunTime = atof(argv[1]); //Reading time in as days. Need to put in our units after paranter file is read in.
}
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("Creating Stars");
glutReshapeFunc(reshape);
init();
glShadeModel(GL_SMOOTH);
glClearColor(0.0, 0.0, 0.0, 0.0);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutIdleFunc(control);
glutMainLoop();
return 0;
}
|
11673b022b5e837b5ae57fede811b20f41e8bfa8.hip | // !!! This is a file automatically generated by hipify!!!
// Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rc+rb;
rb=rb*rd+rc;
rc=rc*ra+rd;
rd=rd*rb+ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
hipEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size);
// allocate host memory
float *h_iA = (float *) malloc(mem_size);
float *h_oC1 = (float *) malloc(mem_size);
float *h_oC2 = (float *) malloc(mem_size);
float *h_oC3 = (float *) malloc(mem_size);
float *h_oC4 = (float *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (float) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
hipMalloc((void **) &d_iA, mem_size);
// hipMalloc((void **) &d_iB, mem_size);
hipMalloc((void **) &d_oC1, mem_size);
hipMalloc((void **) &d_oC2, mem_size);
hipMalloc((void **) &d_oC3, mem_size);
hipMalloc((void **) &d_oC4, mem_size);
// copy host data to device
hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice);
// hipMemcpy(d_iB, h_iB, mem_size, hipMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
hipEventCreate(&start);
hipEventCreate(&stop);
// take measurements for loop over kernel launches
hipEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
hipLaunchKernelGGL(( simpleKernel<float>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float kernelTime;
hipEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
hipFree(d_iA);
// hipFree(d_iB);
hipFree(d_oC1);
hipFree(d_oC2);
hipFree(d_oC3);
hipFree(d_oC4);
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 11673b022b5e837b5ae57fede811b20f41e8bfa8.cu | // Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rc+rb;
rb=rb*rd+rc;
rc=rc*ra+rd;
rd=rd*rb+ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
cudaEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size);
// allocate host memory
float *h_iA = (float *) malloc(mem_size);
float *h_oC1 = (float *) malloc(mem_size);
float *h_oC2 = (float *) malloc(mem_size);
float *h_oC3 = (float *) malloc(mem_size);
float *h_oC4 = (float *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (float) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
cudaMalloc((void **) &d_iA, mem_size);
// cudaMalloc((void **) &d_iB, mem_size);
cudaMalloc((void **) &d_oC1, mem_size);
cudaMalloc((void **) &d_oC2, mem_size);
cudaMalloc((void **) &d_oC3, mem_size);
cudaMalloc((void **) &d_oC4, mem_size);
// copy host data to device
cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_iB, h_iB, mem_size, cudaMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
simpleKernel<float><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
cudaFree(d_iA);
// cudaFree(d_iB);
cudaFree(d_oC1);
cudaFree(d_oC2);
cudaFree(d_oC3);
cudaFree(d_oC4);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
75437c17ad069b3f68e373f1ca4232c43cff5548.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* PolynomialCalulater.cu
*
* Created on: 28/05/2013
* Author: Zeyi Wen
* Copyright @DBGroup University of Melbourne
**/
#include "kernelCalculater.h"
#include "kernelCalGPUHelper.h"
#include "../my_assert.h"
/*
* @brief: compute a certain # of rows of the Hessian Matrix by Polynomial function
* @param: pfDevSamples: a device pointer to the whole samples. These samples indicate which rows are computed in this round
* @param: pfDevTransSamples: a device pointer to the whole samples with transposition
* @param: pfdevHessianRows: a device pointer to a certain # of Hessian Matrix rows to be computed
* @param: nNumofSamples: indicates the length of pfDevTransSamples
* @param: nNumofRows: indicates the length of pfDevSamples
*/
bool CPolynomialKernel::ComputeHessianRows(float_point *pfDevSamples, float_point *pfDevTransSamples, float_point *pfDevHessianRows,
const int &nNumofSamples, const int &nNumofDim,
const int &nNumofRows, const int &nStartRow)
{
bool bReturn = true;
int nBlockSize = 0;
dim3 dimGrid;
GetGPUSpec(dimGrid, nBlockSize, nNumofSamples, nNumofRows);
assert(nBlockSize >= 0);
hipLaunchKernelGGL(( PolynomialKernel), dim3(dimGrid), dim3(nBlockSize), nBlockSize * sizeof(float_point), 0, pfDevSamples,
pfDevTransSamples, pfDevHessianRows, nNumofSamples, nNumofDim, nStartRow, m_fDegree);
hipDeviceSynchronize();
assert(hipGetLastError() == hipSuccess);
return bReturn;
}
| 75437c17ad069b3f68e373f1ca4232c43cff5548.cu | /*
* PolynomialCalulater.cu
*
* Created on: 28/05/2013
* Author: Zeyi Wen
* Copyright @DBGroup University of Melbourne
**/
#include "kernelCalculater.h"
#include "kernelCalGPUHelper.h"
#include "../my_assert.h"
/*
* @brief: compute a certain # of rows of the Hessian Matrix by Polynomial function
* @param: pfDevSamples: a device pointer to the whole samples. These samples indicate which rows are computed in this round
* @param: pfDevTransSamples: a device pointer to the whole samples with transposition
* @param: pfdevHessianRows: a device pointer to a certain # of Hessian Matrix rows to be computed
* @param: nNumofSamples: indicates the length of pfDevTransSamples
* @param: nNumofRows: indicates the length of pfDevSamples
*/
bool CPolynomialKernel::ComputeHessianRows(float_point *pfDevSamples, float_point *pfDevTransSamples, float_point *pfDevHessianRows,
const int &nNumofSamples, const int &nNumofDim,
const int &nNumofRows, const int &nStartRow)
{
bool bReturn = true;
int nBlockSize = 0;
dim3 dimGrid;
GetGPUSpec(dimGrid, nBlockSize, nNumofSamples, nNumofRows);
assert(nBlockSize >= 0);
PolynomialKernel<<<dimGrid, nBlockSize, nBlockSize * sizeof(float_point)>>>(pfDevSamples,
pfDevTransSamples, pfDevHessianRows, nNumofSamples, nNumofDim, nStartRow, m_fDegree);
cudaDeviceSynchronize();
assert(cudaGetLastError() == cudaSuccess);
return bReturn;
}
|
5c185cf9a9623649af901f96e5a7c425463af80d.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <algorithm>
#include <mirheo/core/logger.h>
#include <mirheo/core/task_scheduler.h>
#include "../timer.h"
#define private public
using namespace mirheo;
void verifyDep(const std::string& before, const std::string& after,
const std::vector<std::string>& messages)
{
auto itb = std::find(messages.begin(), messages.end(), before);
auto ita = std::find(messages.begin(), messages.end(), after);
ASSERT_NE(itb, messages.end());
ASSERT_NE(ita, messages.end());
ASSERT_LT(itb, ita);
}
TEST(Scheduler, Order)
{
/*
A1,A2 - B -----------
\ \
D1,D2 - E
C - /
\ F
G
*/
TaskScheduler scheduler;
std::vector<std::string> messages;
auto A1 = scheduler.createTask("A1");
auto A2 = scheduler.createTask("A2");
auto B = scheduler.createTask("B");
auto C = scheduler.createTask("C");
auto D1 = scheduler.createTask("D1");
auto D2 = scheduler.createTask("D2");
auto E = scheduler.createTask("E");
auto F = scheduler.createTask("F");
auto G = scheduler.createTask("G");
scheduler.addTask(A1, [&](__UNUSED hipStream_t s){ messages.push_back("a1"); });
scheduler.addTask(A2, [&](__UNUSED hipStream_t s){ messages.push_back("a2"); });
scheduler.addTask(B , [&](__UNUSED hipStream_t s){ messages.push_back("b" ); });
scheduler.addTask(C , [&](__UNUSED hipStream_t s){ messages.push_back("c" ); });
scheduler.addTask(D1, [&](__UNUSED hipStream_t s){ messages.push_back("d1"); });
scheduler.addTask(D2, [&](__UNUSED hipStream_t s){ messages.push_back("d2"); });
scheduler.addTask(E , [&](__UNUSED hipStream_t s){ messages.push_back("e" ); });
scheduler.addTask(F , [&](__UNUSED hipStream_t s){ messages.push_back("f" ); });
scheduler.addTask(G , [&](__UNUSED hipStream_t s){ messages.push_back("g" ); });
scheduler.addDependency(B, {}, {A1, A2});
scheduler.addDependency(D1, {}, {B, C});
scheduler.addDependency(D2, {}, {B, C});
scheduler.addDependency(F, {}, {C});
scheduler.addDependency(E, {}, {D1, D2, B});
scheduler.compile();
scheduler.run();
ASSERT_EQ(messages.size(), 9);
verifyDep("a1", "b", messages);
verifyDep("a2", "b", messages);
verifyDep("b", "d1", messages);
verifyDep("c", "d1", messages);
verifyDep("b", "d2", messages);
verifyDep("c", "d2", messages);
verifyDep("c", "f", messages);
verifyDep("d1", "e", messages);
verifyDep("d2", "e", messages);
verifyDep("b" , "e", messages);
}
TEST(Scheduler, Benchmark)
{
TaskScheduler scheduler;
float a, b, c, d, e, f, g;
a = b = c = d = e = f = g = 0;
auto A1 = scheduler.createTask("A1");
auto A2 = scheduler.createTask("A2");
auto B = scheduler.createTask("B");
auto C = scheduler.createTask("C");
auto D1 = scheduler.createTask("D1");
auto D2 = scheduler.createTask("D2");
auto E = scheduler.createTask("E");
auto F = scheduler.createTask("F");
auto G = scheduler.createTask("G");
scheduler.addTask(C, [&](__UNUSED hipStream_t s){ c++; });
scheduler.addTask(G, [&](__UNUSED hipStream_t s){ g--; });
scheduler.addTask(D1, [&](__UNUSED hipStream_t s){ d+=2; });
scheduler.addTask(A1, [&](__UNUSED hipStream_t s){ a-=3; });
scheduler.addTask(E, [&](__UNUSED hipStream_t s){ e*=1.001; });
scheduler.addTask(A2, [&](__UNUSED hipStream_t s){ a*=0.9999; });
scheduler.addTask(B, [&](__UNUSED hipStream_t s){ b+=5; });
scheduler.addTask(D2, [&](__UNUSED hipStream_t s){ d-=42; });
scheduler.addTask(F, [&](__UNUSED hipStream_t s){ f*=2; });
scheduler.addDependency(B, {}, {A1, A2});
scheduler.addDependency(D1, {}, {B, C});
scheduler.addDependency(D2, {}, {B, C});
scheduler.addDependency(F, {}, {C});
scheduler.addDependency(E, {}, {D1, D2, B});
scheduler.compile();
Timer timer;
timer.start();
int n = 10000;
for (int i=0; i<n; i++)
scheduler.run();
int64_t tm = timer.elapsed();
double tus = (double)tm / (1000.0*n);
fprintf(stderr, "Per run: %f us\n", tus);
EXPECT_LE(tus, 500.0);
}
int main(int argc, char **argv)
{
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
if (provided < MPI_THREAD_MULTIPLE) {
fprintf(stderr, "ERROR: The MPI library does not have full thread support\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
logger.init(MPI_COMM_WORLD, "scheduler.log", 9);
testing::InitGoogleTest(&argc, argv);
auto ret = RUN_ALL_TESTS();
MPI_Finalize();
return ret;
}
| 5c185cf9a9623649af901f96e5a7c425463af80d.cu | #include <gtest/gtest.h>
#include <string>
#include <vector>
#include <algorithm>
#include <mirheo/core/logger.h>
#include <mirheo/core/task_scheduler.h>
#include "../timer.h"
#define private public
using namespace mirheo;
void verifyDep(const std::string& before, const std::string& after,
const std::vector<std::string>& messages)
{
auto itb = std::find(messages.begin(), messages.end(), before);
auto ita = std::find(messages.begin(), messages.end(), after);
ASSERT_NE(itb, messages.end());
ASSERT_NE(ita, messages.end());
ASSERT_LT(itb, ita);
}
TEST(Scheduler, Order)
{
/*
A1,A2 - B -----------
\ \
D1,D2 - E
C - /
\ F
G
*/
TaskScheduler scheduler;
std::vector<std::string> messages;
auto A1 = scheduler.createTask("A1");
auto A2 = scheduler.createTask("A2");
auto B = scheduler.createTask("B");
auto C = scheduler.createTask("C");
auto D1 = scheduler.createTask("D1");
auto D2 = scheduler.createTask("D2");
auto E = scheduler.createTask("E");
auto F = scheduler.createTask("F");
auto G = scheduler.createTask("G");
scheduler.addTask(A1, [&](__UNUSED cudaStream_t s){ messages.push_back("a1"); });
scheduler.addTask(A2, [&](__UNUSED cudaStream_t s){ messages.push_back("a2"); });
scheduler.addTask(B , [&](__UNUSED cudaStream_t s){ messages.push_back("b" ); });
scheduler.addTask(C , [&](__UNUSED cudaStream_t s){ messages.push_back("c" ); });
scheduler.addTask(D1, [&](__UNUSED cudaStream_t s){ messages.push_back("d1"); });
scheduler.addTask(D2, [&](__UNUSED cudaStream_t s){ messages.push_back("d2"); });
scheduler.addTask(E , [&](__UNUSED cudaStream_t s){ messages.push_back("e" ); });
scheduler.addTask(F , [&](__UNUSED cudaStream_t s){ messages.push_back("f" ); });
scheduler.addTask(G , [&](__UNUSED cudaStream_t s){ messages.push_back("g" ); });
scheduler.addDependency(B, {}, {A1, A2});
scheduler.addDependency(D1, {}, {B, C});
scheduler.addDependency(D2, {}, {B, C});
scheduler.addDependency(F, {}, {C});
scheduler.addDependency(E, {}, {D1, D2, B});
scheduler.compile();
scheduler.run();
ASSERT_EQ(messages.size(), 9);
verifyDep("a1", "b", messages);
verifyDep("a2", "b", messages);
verifyDep("b", "d1", messages);
verifyDep("c", "d1", messages);
verifyDep("b", "d2", messages);
verifyDep("c", "d2", messages);
verifyDep("c", "f", messages);
verifyDep("d1", "e", messages);
verifyDep("d2", "e", messages);
verifyDep("b" , "e", messages);
}
TEST(Scheduler, Benchmark)
{
TaskScheduler scheduler;
float a, b, c, d, e, f, g;
a = b = c = d = e = f = g = 0;
auto A1 = scheduler.createTask("A1");
auto A2 = scheduler.createTask("A2");
auto B = scheduler.createTask("B");
auto C = scheduler.createTask("C");
auto D1 = scheduler.createTask("D1");
auto D2 = scheduler.createTask("D2");
auto E = scheduler.createTask("E");
auto F = scheduler.createTask("F");
auto G = scheduler.createTask("G");
scheduler.addTask(C, [&](__UNUSED cudaStream_t s){ c++; });
scheduler.addTask(G, [&](__UNUSED cudaStream_t s){ g--; });
scheduler.addTask(D1, [&](__UNUSED cudaStream_t s){ d+=2; });
scheduler.addTask(A1, [&](__UNUSED cudaStream_t s){ a-=3; });
scheduler.addTask(E, [&](__UNUSED cudaStream_t s){ e*=1.001; });
scheduler.addTask(A2, [&](__UNUSED cudaStream_t s){ a*=0.9999; });
scheduler.addTask(B, [&](__UNUSED cudaStream_t s){ b+=5; });
scheduler.addTask(D2, [&](__UNUSED cudaStream_t s){ d-=42; });
scheduler.addTask(F, [&](__UNUSED cudaStream_t s){ f*=2; });
scheduler.addDependency(B, {}, {A1, A2});
scheduler.addDependency(D1, {}, {B, C});
scheduler.addDependency(D2, {}, {B, C});
scheduler.addDependency(F, {}, {C});
scheduler.addDependency(E, {}, {D1, D2, B});
scheduler.compile();
Timer timer;
timer.start();
int n = 10000;
for (int i=0; i<n; i++)
scheduler.run();
int64_t tm = timer.elapsed();
double tus = (double)tm / (1000.0*n);
fprintf(stderr, "Per run: %f us\n", tus);
EXPECT_LE(tus, 500.0);
}
int main(int argc, char **argv)
{
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
if (provided < MPI_THREAD_MULTIPLE) {
fprintf(stderr, "ERROR: The MPI library does not have full thread support\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
logger.init(MPI_COMM_WORLD, "scheduler.log", 9);
testing::InitGoogleTest(&argc, argv);
auto ret = RUN_ALL_TESTS();
MPI_Finalize();
return ret;
}
|
7568bb4f8481d5b667184758936fe86c0bc83af0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nn.cu
* Nearest Neighbor
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <float.h>
#include <vector>
#include "hip/hip_runtime.h"
#ifdef TIMING
#include "timing.h"
struct timeval tv;
struct timeval tv_total_start, tv_total_end;
struct timeval tv_h2d_start, tv_h2d_end;
struct timeval tv_d2h_start, tv_d2h_end;
struct timeval tv_kernel_start, tv_kernel_end;
struct timeval tv_mem_alloc_start, tv_mem_alloc_end;
struct timeval tv_close_start, tv_close_end;
float init_time = 0, mem_alloc_time = 0, h2d_time = 0, kernel_time = 0,
d2h_time = 0, close_time = 0, total_time = 0;
#endif
#define min( a, b ) a > b ? b : a
#define ceilDiv( a, b ) ( a + b - 1 ) / b
#define print( x ) printf( #x ": %lu\n", (unsigned long) x )
#define DEBUG false
#define DEFAULT_THREADS_PER_BLOCK 256
#define MAX_ARGS 10
#define REC_LENGTH 53 // size of a record in db
#define LATITUDE_POS 28 // character position of the latitude value in each record
#define OPEN 10000 // initial value of nearest neighbors
typedef struct latLong
{
float lat;
float lng;
} LatLong;
typedef struct record
{
char recString[REC_LENGTH];
float distance;
} Record;
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations);
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN);
void printUsage();
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d);
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
//int globalId = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x; // more efficient
LatLong *latLong = d_locations+globalId;
if (globalId < numRecords) {
float *dist=d_distances+globalId;
*dist = (float)sqrt((lat-latLong->lat)*(lat-latLong->lat)+(lng-latLong->lng)*(lng-latLong->lng));
}
}
/**
* This program finds the k-nearest neighbors
**/
int main(int argc, char* argv[])
{
int i=0;
float lat, lng;
int quiet=0,timing=0,platform=0,device=0;
std::vector<Record> records;
std::vector<LatLong> locations;
char filename[100];
int resultsCount=10;
// parse command line
if (parseCommandline(argc, argv, filename,&resultsCount,&lat,&lng,
&quiet, &timing, &platform, &device)) {
printUsage();
return 0;
}
int numRecords = loadData(filename,records,locations);
if (resultsCount > numRecords) resultsCount = numRecords;
//for(i=0;i<numRecords;i++)
// printf("%s, %f, %f\n",(records[i].recString),locations[i].lat,locations[i].lng);
//Pointers to host memory
float *distances;
//Pointers to device memory
LatLong *d_locations;
float *d_distances;
// Scaling calculations - added by Sam Kauffman
hipDeviceProp_t deviceProp;
hipGetDeviceProperties( &deviceProp, 0 );
hipDeviceSynchronize();
unsigned long maxGridX = deviceProp.maxGridSize[0];
unsigned long threadsPerBlock = min( deviceProp.maxThreadsPerBlock, DEFAULT_THREADS_PER_BLOCK );
size_t totalDeviceMemory;
size_t freeDeviceMemory;
hipMemGetInfo( &freeDeviceMemory, &totalDeviceMemory );
hipDeviceSynchronize();
unsigned long usableDeviceMemory = freeDeviceMemory * 85 / 100; // 85% arbitrary throttle to compensate for known CUDA bug
unsigned long maxThreads = usableDeviceMemory / 12; // 4 bytes in 3 vectors per thread
if ( numRecords > maxThreads )
{
fprintf( stderr, "Error: Input too large.\n" );
exit( 1 );
}
unsigned long blocks = ceilDiv( numRecords, threadsPerBlock ); // extra threads will do nothing
unsigned long gridY = ceilDiv( blocks, maxGridX );
unsigned long gridX = ceilDiv( blocks, gridY );
// There will be no more than (gridY - 1) extra blocks
dim3 gridDim( gridX, gridY );
if ( DEBUG )
{
print( totalDeviceMemory ); // 804454400
print( freeDeviceMemory );
print( usableDeviceMemory );
print( maxGridX ); // 65535
print( deviceProp.maxThreadsPerBlock ); // 1024
print( threadsPerBlock );
print( maxThreads );
print( blocks ); // 130933
print( gridY );
print( gridX );
}
/**
* Allocate memory on host and device
*/
hipHostMalloc((void **) &distances, sizeof(float) * numRecords, hipHostMallocDefault);
hipMalloc((void **) &d_locations,sizeof(LatLong) * numRecords);
hipMalloc((void **) &d_distances,sizeof(float) * numRecords);
/**
* Transfer data from host to device
*/
hipMemcpy( d_locations, &locations[0], sizeof(LatLong) * numRecords, hipMemcpyHostToDevice);
/**
* Execute kernel
*/
#ifdef TIMING
gettimeofday(&tv_kernel_start, NULL);
#endif
hipLaunchKernelGGL(( euclid), dim3(gridDim), dim3(threadsPerBlock) , 0, 0, d_locations,d_distances,numRecords,lat,lng);
hipDeviceSynchronize();
#ifdef TIMING
gettimeofday(&tv_kernel_end, NULL);
tvsub(&tv_kernel_end, &tv_kernel_start, &tv);
kernel_time += tv.tv_sec * 1000.0 + (float) tv.tv_usec / 1000.0;
#endif
//Copy data from device memory to host memory
hipMemcpy( distances, d_distances, sizeof(float)*numRecords, hipMemcpyDeviceToHost );
// find the resultsCount least distances
findLowest(records,distances,numRecords,resultsCount);
// print out results
if (!quiet)
for(i=0;i<resultsCount;i++) {
printf("%s --> Distance=%f\n",records[i].recString,records[i].distance);
}
hipHostFree(distances);
//Free memory
hipFree(d_locations);
hipFree(d_distances);
#ifdef TIMING
printf("Exec: %f\n", kernel_time);
#endif
}
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations){
FILE *flist,*fp;
int i=0;
char dbname[64];
int recNum=0;
/**Main processing **/
flist = fopen(filename, "r");
while(!feof(flist)) {
/**
* Read in all records of length REC_LENGTH
* If this is the last file in the filelist, then done
* else open next file to be read next iteration
*/
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
fp = fopen(dbname, "r");
if(!fp) {
printf("error opening a db\n");
exit(1);
}
// read each record
while(!feof(fp)){
Record record;
LatLong latLong;
fgets(record.recString,49,fp);
fgetc(fp); // newline
if (feof(fp)) break;
// parse for lat and long
char substr[6];
for(i=0;i<5;i++) substr[i] = *(record.recString+i+28);
substr[5] = '\0';
latLong.lat = atof(substr);
for(i=0;i<5;i++) substr[i] = *(record.recString+i+33);
substr[5] = '\0';
latLong.lng = atof(substr);
locations.push_back(latLong);
records.push_back(record);
recNum++;
}
fclose(fp);
}
fclose(flist);
// for(i=0;i<rec_count*REC_LENGTH;i++) printf("%c",sandbox[i]);
return recNum;
}
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN){
int i,j;
float val;
int minLoc;
Record *tempRec;
float tempDist;
for(i=0;i<topN;i++) {
minLoc = i;
for(j=i;j<numRecords;j++) {
val = distances[j];
if (val < distances[minLoc]) minLoc = j;
}
// swap locations and distances
tempRec = &records[i];
records[i] = records[minLoc];
records[minLoc] = *tempRec;
tempDist = distances[i];
distances[i] = distances[minLoc];
distances[minLoc] = tempDist;
// add distance to the min we just found
records[i].distance = distances[i];
}
}
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d){
int i;
if (argc < 2) return 1; // error
strncpy(filename,argv[1],100);
char flag;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 'r': // number of results
i++;
*r = atoi(argv[i]);
break;
case 'l': // lat or lng
if (argv[i][2]=='a') {//lat
*lat = atof(argv[i+1]);
}
else {//lng
*lng = atof(argv[i+1]);
}
i++;
break;
case 'h': // help
return 1;
case 'q': // quiet
*q = 1;
break;
case 't': // timing
*t = 1;
break;
case 'p': // platform
i++;
*p = atoi(argv[i]);
break;
case 'd': // device
i++;
*d = atoi(argv[i]);
break;
}
}
}
if ((*d >= 0 && *p<0) || (*p>=0 && *d<0)) // both p and d must be specified if either are specified
return 1;
return 0;
}
void printUsage(){
printf("Nearest Neighbor Usage\n");
printf("\n");
printf("nearestNeighbor [filename] -r [int] -lat [float] -lng [float] [-hqt] [-p [int] -d [int]]\n");
printf("\n");
printf("example:\n");
printf("$ ./nearestNeighbor filelist.txt -r 5 -lat 30 -lng 90\n");
printf("\n");
printf("filename the filename that lists the data input files\n");
printf("-r [int] the number of records to return (default: 10)\n");
printf("-lat [float] the latitude for nearest neighbors (default: 0)\n");
printf("-lng [float] the longitude for nearest neighbors (default: 0)\n");
printf("\n");
printf("-h, --help Display the help file\n");
printf("-q Quiet mode. Suppress all text output.\n");
printf("-t Print timing information.\n");
printf("\n");
printf("-p [int] Choose the platform (must choose both platform and device)\n");
printf("-d [int] Choose the device (must choose both platform and device)\n");
printf("\n");
printf("\n");
printf("Notes: 1. The filename is required as the first parameter.\n");
printf(" 2. If you declare either the device or the platform,\n");
printf(" you must declare both.\n\n");
}
| 7568bb4f8481d5b667184758936fe86c0bc83af0.cu | /*
* nn.cu
* Nearest Neighbor
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <float.h>
#include <vector>
#include "cuda.h"
#ifdef TIMING
#include "timing.h"
struct timeval tv;
struct timeval tv_total_start, tv_total_end;
struct timeval tv_h2d_start, tv_h2d_end;
struct timeval tv_d2h_start, tv_d2h_end;
struct timeval tv_kernel_start, tv_kernel_end;
struct timeval tv_mem_alloc_start, tv_mem_alloc_end;
struct timeval tv_close_start, tv_close_end;
float init_time = 0, mem_alloc_time = 0, h2d_time = 0, kernel_time = 0,
d2h_time = 0, close_time = 0, total_time = 0;
#endif
#define min( a, b ) a > b ? b : a
#define ceilDiv( a, b ) ( a + b - 1 ) / b
#define print( x ) printf( #x ": %lu\n", (unsigned long) x )
#define DEBUG false
#define DEFAULT_THREADS_PER_BLOCK 256
#define MAX_ARGS 10
#define REC_LENGTH 53 // size of a record in db
#define LATITUDE_POS 28 // character position of the latitude value in each record
#define OPEN 10000 // initial value of nearest neighbors
typedef struct latLong
{
float lat;
float lng;
} LatLong;
typedef struct record
{
char recString[REC_LENGTH];
float distance;
} Record;
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations);
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN);
void printUsage();
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d);
/**
* Kernel
* Executed on GPU
* Calculates the Euclidean distance from each record in the database to the target position
*/
__global__ void euclid(LatLong *d_locations, float *d_distances, int numRecords,float lat, float lng)
{
//int globalId = gridDim.x * blockDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
int globalId = blockDim.x * ( gridDim.x * blockIdx.y + blockIdx.x ) + threadIdx.x; // more efficient
LatLong *latLong = d_locations+globalId;
if (globalId < numRecords) {
float *dist=d_distances+globalId;
*dist = (float)sqrt((lat-latLong->lat)*(lat-latLong->lat)+(lng-latLong->lng)*(lng-latLong->lng));
}
}
/**
* This program finds the k-nearest neighbors
**/
int main(int argc, char* argv[])
{
int i=0;
float lat, lng;
int quiet=0,timing=0,platform=0,device=0;
std::vector<Record> records;
std::vector<LatLong> locations;
char filename[100];
int resultsCount=10;
// parse command line
if (parseCommandline(argc, argv, filename,&resultsCount,&lat,&lng,
&quiet, &timing, &platform, &device)) {
printUsage();
return 0;
}
int numRecords = loadData(filename,records,locations);
if (resultsCount > numRecords) resultsCount = numRecords;
//for(i=0;i<numRecords;i++)
// printf("%s, %f, %f\n",(records[i].recString),locations[i].lat,locations[i].lng);
//Pointers to host memory
float *distances;
//Pointers to device memory
LatLong *d_locations;
float *d_distances;
// Scaling calculations - added by Sam Kauffman
cudaDeviceProp deviceProp;
cudaGetDeviceProperties( &deviceProp, 0 );
cudaThreadSynchronize();
unsigned long maxGridX = deviceProp.maxGridSize[0];
unsigned long threadsPerBlock = min( deviceProp.maxThreadsPerBlock, DEFAULT_THREADS_PER_BLOCK );
size_t totalDeviceMemory;
size_t freeDeviceMemory;
cudaMemGetInfo( &freeDeviceMemory, &totalDeviceMemory );
cudaThreadSynchronize();
unsigned long usableDeviceMemory = freeDeviceMemory * 85 / 100; // 85% arbitrary throttle to compensate for known CUDA bug
unsigned long maxThreads = usableDeviceMemory / 12; // 4 bytes in 3 vectors per thread
if ( numRecords > maxThreads )
{
fprintf( stderr, "Error: Input too large.\n" );
exit( 1 );
}
unsigned long blocks = ceilDiv( numRecords, threadsPerBlock ); // extra threads will do nothing
unsigned long gridY = ceilDiv( blocks, maxGridX );
unsigned long gridX = ceilDiv( blocks, gridY );
// There will be no more than (gridY - 1) extra blocks
dim3 gridDim( gridX, gridY );
if ( DEBUG )
{
print( totalDeviceMemory ); // 804454400
print( freeDeviceMemory );
print( usableDeviceMemory );
print( maxGridX ); // 65535
print( deviceProp.maxThreadsPerBlock ); // 1024
print( threadsPerBlock );
print( maxThreads );
print( blocks ); // 130933
print( gridY );
print( gridX );
}
/**
* Allocate memory on host and device
*/
cudaHostAlloc((void **) &distances, sizeof(float) * numRecords, cudaHostAllocDefault);
cudaMalloc((void **) &d_locations,sizeof(LatLong) * numRecords);
cudaMalloc((void **) &d_distances,sizeof(float) * numRecords);
/**
* Transfer data from host to device
*/
cudaMemcpy( d_locations, &locations[0], sizeof(LatLong) * numRecords, cudaMemcpyHostToDevice);
/**
* Execute kernel
*/
#ifdef TIMING
gettimeofday(&tv_kernel_start, NULL);
#endif
euclid<<< gridDim, threadsPerBlock >>>(d_locations,d_distances,numRecords,lat,lng);
cudaThreadSynchronize();
#ifdef TIMING
gettimeofday(&tv_kernel_end, NULL);
tvsub(&tv_kernel_end, &tv_kernel_start, &tv);
kernel_time += tv.tv_sec * 1000.0 + (float) tv.tv_usec / 1000.0;
#endif
//Copy data from device memory to host memory
cudaMemcpy( distances, d_distances, sizeof(float)*numRecords, cudaMemcpyDeviceToHost );
// find the resultsCount least distances
findLowest(records,distances,numRecords,resultsCount);
// print out results
if (!quiet)
for(i=0;i<resultsCount;i++) {
printf("%s --> Distance=%f\n",records[i].recString,records[i].distance);
}
cudaFreeHost(distances);
//Free memory
cudaFree(d_locations);
cudaFree(d_distances);
#ifdef TIMING
printf("Exec: %f\n", kernel_time);
#endif
}
int loadData(char *filename,std::vector<Record> &records,std::vector<LatLong> &locations){
FILE *flist,*fp;
int i=0;
char dbname[64];
int recNum=0;
/**Main processing **/
flist = fopen(filename, "r");
while(!feof(flist)) {
/**
* Read in all records of length REC_LENGTH
* If this is the last file in the filelist, then done
* else open next file to be read next iteration
*/
if(fscanf(flist, "%s\n", dbname) != 1) {
fprintf(stderr, "error reading filelist\n");
exit(0);
}
fp = fopen(dbname, "r");
if(!fp) {
printf("error opening a db\n");
exit(1);
}
// read each record
while(!feof(fp)){
Record record;
LatLong latLong;
fgets(record.recString,49,fp);
fgetc(fp); // newline
if (feof(fp)) break;
// parse for lat and long
char substr[6];
for(i=0;i<5;i++) substr[i] = *(record.recString+i+28);
substr[5] = '\0';
latLong.lat = atof(substr);
for(i=0;i<5;i++) substr[i] = *(record.recString+i+33);
substr[5] = '\0';
latLong.lng = atof(substr);
locations.push_back(latLong);
records.push_back(record);
recNum++;
}
fclose(fp);
}
fclose(flist);
// for(i=0;i<rec_count*REC_LENGTH;i++) printf("%c",sandbox[i]);
return recNum;
}
void findLowest(std::vector<Record> &records,float *distances,int numRecords,int topN){
int i,j;
float val;
int minLoc;
Record *tempRec;
float tempDist;
for(i=0;i<topN;i++) {
minLoc = i;
for(j=i;j<numRecords;j++) {
val = distances[j];
if (val < distances[minLoc]) minLoc = j;
}
// swap locations and distances
tempRec = &records[i];
records[i] = records[minLoc];
records[minLoc] = *tempRec;
tempDist = distances[i];
distances[i] = distances[minLoc];
distances[minLoc] = tempDist;
// add distance to the min we just found
records[i].distance = distances[i];
}
}
int parseCommandline(int argc, char *argv[], char* filename,int *r,float *lat,float *lng,
int *q, int *t, int *p, int *d){
int i;
if (argc < 2) return 1; // error
strncpy(filename,argv[1],100);
char flag;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 'r': // number of results
i++;
*r = atoi(argv[i]);
break;
case 'l': // lat or lng
if (argv[i][2]=='a') {//lat
*lat = atof(argv[i+1]);
}
else {//lng
*lng = atof(argv[i+1]);
}
i++;
break;
case 'h': // help
return 1;
case 'q': // quiet
*q = 1;
break;
case 't': // timing
*t = 1;
break;
case 'p': // platform
i++;
*p = atoi(argv[i]);
break;
case 'd': // device
i++;
*d = atoi(argv[i]);
break;
}
}
}
if ((*d >= 0 && *p<0) || (*p>=0 && *d<0)) // both p and d must be specified if either are specified
return 1;
return 0;
}
void printUsage(){
printf("Nearest Neighbor Usage\n");
printf("\n");
printf("nearestNeighbor [filename] -r [int] -lat [float] -lng [float] [-hqt] [-p [int] -d [int]]\n");
printf("\n");
printf("example:\n");
printf("$ ./nearestNeighbor filelist.txt -r 5 -lat 30 -lng 90\n");
printf("\n");
printf("filename the filename that lists the data input files\n");
printf("-r [int] the number of records to return (default: 10)\n");
printf("-lat [float] the latitude for nearest neighbors (default: 0)\n");
printf("-lng [float] the longitude for nearest neighbors (default: 0)\n");
printf("\n");
printf("-h, --help Display the help file\n");
printf("-q Quiet mode. Suppress all text output.\n");
printf("-t Print timing information.\n");
printf("\n");
printf("-p [int] Choose the platform (must choose both platform and device)\n");
printf("-d [int] Choose the device (must choose both platform and device)\n");
printf("\n");
printf("\n");
printf("Notes: 1. The filename is required as the first parameter.\n");
printf(" 2. If you declare either the device or the platform,\n");
printf(" you must declare both.\n\n");
}
|
1fe6a9791bc9b7437cf110dbfd9cb441344e9f38.hip | // !!! This is a file automatically generated by hipify!!!
/* Assignment 2 Program
Building upon the code written for assignment 1, this program implements GPU code for N-body simulation and visualization.
In total, we implement a serial CPU version, an OpenMP version for multicore processors, and a CUDA version for Nvidia GPUs.
Timing code is also included for benchmarking performance.
The accompanying report provides discussion on design considerations regarding performance optimization and validation. */
/* Problem Description
We consider a system of N bodies in frictionless 2D space exerting gravitational force on each other.
See https://en.wikipedia.org/wiki/N-body_problem for further background on the physics of the N-body problem.
We simulate the progression of such an N-body system through time using numerical integration by evaluating all pairwise
gravitational interactions between bodies in the system. The force `F_{ij}` of gravity on a body `i` exerted by a body `j`
can be calculated through the following formula: `F_{ij} = G*m_{i}*m_{j}*r_{ji}/|r_{ji}|^{3}` where `G` is the gravitational
constant, `m` denotes the mass of a body, and `r_{ji}` denotes the displacement vector from `i` towards `j`.
This is known as [Newton's Law of Universal Gravitation](https://en.wikipedia.org/wiki/Newton%27s_law_of_universal_gravitation)
We add a softening factor `eps` to the denominator to avoid the force between two approaching bodies growing without bound.
This replaces `|r_{ji}|` with `sqrt(|r_{ji}|^{2} + eps^{2})` in the expression in the denominator.
At each time `t_{k}` we calculate the resultant (sum total) force `F_{i;k}` on each body and use this to calculate
acceleration `a_{i;k}`, then use the [Forward Euler method](https://en.wikipedia.org/wiki/Euler_method) to update the
velocity and position at time `t_{k+1} = t_{k} + dt` based on `a_{i;k}`, `v_{i;k}`, respectively, where `dt` is the time step. */
/* C Language Library headers */
#include <stdio.h> // http://www.cplusplus.com/reference/cstdio/
#include <stdlib.h> // http://www.cplusplus.com/reference/cstdlib/
#include <string.h> // http://www.cplusplus.com/reference/cstring/
#include <ctype.h> // http://www.cplusplus.com/reference/cctype/
#include <time.h> // http://www.cplusplus.com/reference/ctime/
#include <math.h> // http://www.cplusplus.com/reference/cmath/
/* To enable OpenMP support in your project you will need to include the OpenMP header file `omp.h`
and enable the compiler to use the OpenMP runtime.
Set 'OpenMP Support' to 'Yes' (for both Debug and Release builds) in Project->Properties->C/C++->Language
Add `_CRT_SECURE_NO_WARNINGS` to 'Preprocessor Definitions' in Project->Properties->C/C++->Preprocessor */
#include <omp.h>
#include <hip/hip_runtime.h>
/* Local header files */
#include "NBody.h"
#include "NBodyVisualiser.h"
/* Preprocessor definitions/macros */
#define USER_NAME "smp16emp" // Replace with your username
#define BUFFER_SIZE 128 // Maximum line length accepted from input file (reasonable as only 5 (comma separated) floating point numbers expected)
#define THREADS_PER_BLOCK 256
/* Function declarations/prototypes */
void print_help();
void parseNDM(const char* argv[3]);
void parse_one_option(const char* argv[2]);
void parse_two_options(const char* argv[4]);
unsigned int parse_str_as_uint(const char* str);
void read_nbody_file(const char* filename, const int N);
void checkLastError(const char* msg);
void step_serial(void);
void step_OpenMP(void);
void step_CUDA(void);
void swap_float_pointers(float** p1, float** p2);
/* Global variables (shared by/used in multiple functions) */
/* Command line inputs */
unsigned int N; // Number of bodies in the system
unsigned int D; // Dimension of the activity grid
MODE M; // Operation mode. Allows CPU = 0, OPENMP = 1, CUDA = 2
unsigned int I = 0; // Number of iterations of the simulation to calculate when the `-i` flag is set, else 0
unsigned int f_flag = 0; // Input file flag. 0 if not specified, else such that `input_filename = options[f_flag]` in `main`.
/* Data buffers */
nbody_soa* h_nbodies; // Pointer to a structure of arrays (preferred over an array of structures for coalesced memory access)
/* Separate output buffers for updated particle positions are required to avoid interference between loop iterations/threads
when calculating forces based on current particle positions. Buffers for output velocity components are not required
because a given particle's velocity is only used to calculate its own new position and nothing else. However this requires
each particle's new position be calculated first before its velocity is updated in-place.
Pointer swapping can be used to reduce memory copying between multiple buffers when updating system state.
See https://en.wikipedia.org/wiki/Multiple_buffering for more on double/multiple buffering
The visualiser only (re)reads position data once after each time the simulation `step` function completes,
rather than throughout the whole `step` calculation process, so the particles update positions in sync anyway */
/* Whether the following three pointers are host pointers or device pointers will depend on the operation mode */
float* out_x; // Pointer to store the new `x` coordinate of each body before updating in sync after loops complete
float* out_y; // Pointer to store the new `y` coordinate of each body before updating in sync after loops complete
float* activity_map; // Pointer to flattened array of D*D float values storing normalised particle density values in a 2D grid
/* Device pointers */
nbody_soa* d_nbodies; // Device pointer for nbody data
/* Device Functions and Kernels */
__device__ void swap_float_pointers(float** p1, float** p2) {
// Function arguments are always passed by value, so to swap two pointers, we must pass references to those pointers
// The arguments `p1` and `p2` are actually addresses of pointers to `float` data (rather than the pointers themselves)
float* temp = *p1; // Set `temp` to be the pointer referenced by p1
*p1 = *p2; // Overwrite the pointer addressed by `p1` with the pointer addressed by `p2`
*p2 = temp; // Overwrite the pointer addressed by `p2` with the pointer addressed by `temp` (originally addressed by `p1`)
}
__global__ void simulation_kernel(nbody_soa * nbody_in, float * new_x, float * new_y, float * activity, const unsigned int N, const unsigned int D) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Iterating over bodies in the Nbody system, one thread per body
if (i < N) { // One unique index for each body and any leftover threads stay idle
float ax = 0, ay = 0; // Initialise resultant acceleration to zero
// Read position data from global/constant/texture memory to thread-local stack variables
float local_xi = nbody_in->x[i];
float local_yi = nbody_in->y[i];
// Calculate the acceleration of body `i` due to gravitational force from the other bodies
for (unsigned int j = 0; j < N; j++) {
if (j == i) { // Skip the calculation when i = j
continue;
}
// Calculate displacement from particle `i` to particle `j`, since common expression in force equation
float x_ji = nbody_in->x[j] - local_xi;
float y_ji = nbody_in->y[j] - local_yi;
// Calculate distance from `i` to `j` with softening factor since used in denominator of force expression
// Single precision square root: https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html
float dist_ij = sqrtf(x_ji * x_ji + y_ji * y_ji + eps_sq);
/* Add unscaled contribution to acceleration due to gravitational force of `j` on `i`
Universal Gravitation: `F_ij = G * m_i * m_j * r_ji / |r_ji|^3` ; Newton's 2nd Law: F_i = m_i * a_i */
ax += nbody_in->m[j] * x_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
ay += nbody_in->m[j] * y_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
/* It would be possible to add force/acceleration contributions to `d_nbodies->v` directly within this inner loop.
However this would cause this function to be bound by memory access latency (repeated writes to `d_nbodies->v`).
Therefore we use the temporary/local variables `ax` and `ay` instead */
}
/* Use current velocity, acceleration to calculate position, velocity at next time step, respectively. */
float local_vxi = nbody_in->vx[i];
float local_vyi = nbody_in->vy[i];
// Care has to be taken about the order of execution to ensure the output positions are calculated correctly
// Use current velocity to calculate next position
local_xi += local_vxi * dt;
local_yi += local_vyi * dt;
// Now the local position variables hold the new positions and can be used to update the activity map
// Use current acceleration (based on current positions) to calculate the new velocity
// Scale `ax`, `ay` by gravitational constant `G`. See `NBody.h` for definition and comment.
nbody_in->vx[i] = local_vxi + G * ax * dt; // Write the new velocity back to `d_nbodies->vx[i]`
nbody_in->vy[i] = local_vyi + G * ay * dt; // Write the new velocity back to `d_nbodies->vy[i]`
// We can update particle velocities in-place without adversely affecting subsequent iterations/other threads
// Write the new position of particle `i` to the output buffers to avoid interfering with other threads
new_x[i] = local_xi;
new_y[i] = local_yi;
// Pointer swapping of position data occurs outside of the kernel launch in the `step_CUDA` function
// Update the activity map - a flat array of D*D float values storing normalised particle density values in a 2D grid
// First check whether the new position of particle `i` is within the activity grid [0,1)^{2}. Branching thread logic.
if ((local_xi >= 0) && (local_xi < 1) && (local_yi >= 0) && (local_yi < 1)) {
// If so, calculate the index of the grid element that particle `i` is in
// Multiply position vector by `D` then truncate components to `int` to find position in \{0,...,D-1\}^{2} grid
unsigned int index = D * (int)(D * local_yi) + (int)(D * local_xi); // Linearize the index from 2D grid into 1D array
// Increase the associated histogram bin by the normalised quantity `D/N` (scaling by D to increase brightness)
// Can result in race condition as multiple threads could increment at once. Could solve with `atomicAdd`
activity[index] += (float) D / N;
// Unfortunately this is a random access (write) to global memory and cannot easily be coalesced
/* We choose not to reduce the number of multiplication/division operations by incrementing the histogram bin by one
at this step and then scaling the histogram counts in a separate loop (as in the other implementations) in order to
avoid launching a separate grid/kernel with D^2 threads, thus reducing the number of kernel launchs */
}
}
}
/* For information on how to parse command line parameters, see http://www.cplusplus.com/articles/DEN36Up4/
`argc` in the count of the command arguments, and `argv` is an array (of length `argc`) of the arguments.
The first argument is always the executable name (including path) */
int main(const int argc, const char *argv[]) {
/* Process the command line arguments */
switch (argc) {
case 4: // No optional flags used
parseNDM(&argv[1]);
break;
case 6: // One optional flag and argument used
parse_one_option(&argv[4]);
parseNDM(&argv[1]);
break;
case 8: // Two optional flags with arguments used
parse_two_options(&argv[4]);
parseNDM(&argv[1]);
break;
default: // The expected arguments are: "nbody.exe N D M [-i I] [-f input_file]"
fprintf(stderr, "Error: Unexpected number of arguments. %d arguments (including executable name) received\n", argc);
print_help();
exit(EXIT_FAILURE);
}
// Declare a function pointer to a simulation step function and set its value according to the operation mode `M`
void (*simulate)(void) = NULL; // Declare `simulate` as pointer to function (void) returning void
switch (M) {
case CPU:
simulate = &step_serial;
break;
case OPENMP:
simulate = &step_OpenMP;
printf("OpenMP using %d threads\n", omp_get_max_threads());
break;
case CUDA:
simulate = &step_CUDA;
break;
}
/* Allocate Heap Memory */
// Calculate memory requirements
const unsigned int data_column_size = sizeof(float) * N;
const unsigned int activity_grid_size = sizeof(float) * D * D;
// Memory allocation. See http://www.cplusplus.com/reference/cstdlib/malloc/
h_nbodies = (nbody_soa*)malloc(sizeof(nbody_soa));
h_nbodies->x = (float*)malloc(data_column_size);
h_nbodies->y = (float*)malloc(data_column_size);
// Allocates memory block for length N array of floats, and initialize all bits to zero (for default zero initial velocity).
// See http://www.cplusplus.com/reference/cstdlib/calloc/
h_nbodies->vx = (float*)calloc(N, sizeof(float)); // Zero initial velocity
h_nbodies->vy = (float*)calloc(N, sizeof(float)); // Zero initial velocity
h_nbodies->m = (float*)malloc(data_column_size);
if ((h_nbodies == NULL) || (h_nbodies->x == NULL) || (h_nbodies->y == NULL) || (h_nbodies->vx == NULL) || (h_nbodies->vy == NULL) || (h_nbodies->m == NULL)) {
fprintf(stderr, "Error allocating host memory (`h_nbodies`) for system with %d bodies\n", N);
exit(EXIT_FAILURE);
}
if (M == CUDA) {
/* Allocate device memory */
hipMalloc((void**)&d_nbodies, sizeof(nbody_soa));
hipMalloc((void**)&d_nbodies->x, data_column_size);
hipMalloc((void**)&d_nbodies->y, data_column_size);
hipMalloc((void**)&d_nbodies->vx, data_column_size);
hipMalloc((void**)&d_nbodies->vy, data_column_size);
hipMalloc((void**)&d_nbodies->m, data_column_size);
hipMalloc((void**)&out_x, data_column_size);
hipMalloc((void**)&out_y, data_column_size);
hipMalloc((void**)&activity_map, activity_grid_size);
checkCUDAError("Memory allocation on device with hipMalloc");
}
else { // Whether `out_x`, `out_y`, and `activity_map` are pointers on the host or device depends on operation mode
out_x = (float*)malloc(data_column_size);
out_y = (float*)malloc(data_column_size);
if ((out_x == NULL) || (out_y == NULL)) {
fprintf(stderr, "Error allocating host memory (output position buffers) for system with %d bodies\n", N);
exit(EXIT_FAILURE);
}
activity_map = (float*)malloc(activity_grid_size);
if (activity_map == NULL) {
fprintf(stderr, "Error allocating host memory (`activity map`) for system with %d bodies, activity grid size %d\n", N, D);
exit(EXIT_FAILURE);
}
}
/* Read initial data from file to host memory, or generate random initial state according to optional program flag `-f`. */
if (f_flag == 0) { // No input file specified, so a random initial N-body state will be generated
const float one_over_N = (float)1 / N; // Store the inverse of `N` as a constant to avoid recalculating in loop
for (unsigned int i = 0; i < N; i++) {
h_nbodies->x[i] = (float)rand() / RAND_MAX; // Random position in [0,1]
h_nbodies->y[i] = (float)rand() / RAND_MAX; // Random position in [0,1]
h_nbodies->m[i] = one_over_N; // Mass distributed equally among N bodies
// Note that velocity data has already been initialized to zero for all bodies
}
}
else { // Attempt to read initial N-body system state from input csv file to host memory
read_nbody_file(argv[f_flag], N);
}
if (M == CUDA) {
/* Copy the host input values in `h_nbodies` to the device memory `d_nbodies`. */
hipMemcpy(d_nbodies->x, h_nbodies->x, data_column_size, hipMemcpyHostToDevice);
hipMemcpy(d_nbodies->y, h_nbodies->y, data_column_size, hipMemcpyHostToDevice);
hipMemcpy(d_nbodies->vx, h_nbodies->vx, data_column_size, hipMemcpyHostToDevice);
hipMemcpy(d_nbodies->vy, h_nbodies->vy, data_column_size, hipMemcpyHostToDevice);
hipMemcpy(d_nbodies->m, h_nbodies->m, data_column_size, hipMemcpyHostToDevice);
checkCUDAError("Input transfer to device");
}
/* According to the value of program argument `I` either configure and start the visualiser,
or perform a fixed number of simulation steps and output the timing results. */
if (I == 0) { // Run visualiser when number of iterations not specified with `-i` flag, or otherwise `I` was set to 0
initViewer(N, D, M, simulate); // The simulation step function has been set earlier according to operation mode `M`
// Set where the visualiser will check for particle position data after each iteration
if (M == CUDA) {
setNBodyPositions(d_nbodies); // Device pointer
}
else {
setNBodyPositions(h_nbodies); // Host pointer
}
setActivityMapData(activity_map); // This is where the visualiser will check for activity data after each iteration
startVisualisationLoop();
}
else { // Run the simulation for `I` iterations and output the timing results
switch (M) { // Simulation and timing methods depend on operation mode
case CPU:
clock_t t; // Clock ticks for serial CPU timing
double seconds = 0; // Variable to hold execution timing results
t = clock(); // Starting timestamp. See http://www.cplusplus.com/reference/ctime/clock/
for (unsigned int i = 0; i < I; i++) {
step_serial();
}
t = clock() - t; // Take end timestamp and calculate difference from start in clock ticks
seconds = (double)t / CLOCKS_PER_SEC;
printf("Execution time %d seconds %d milliseconds for %d iterations\n", (int)seconds, (int)((seconds - (int)seconds) * 1000), I);
break;
case OPENMP:
double start, end; // Timestamps for OpenMP timing
double seconds = 0; // Variable to hold execution timing results
start = omp_get_wtime(); // Starting timestamp. See https://www.openmp.org/spec-html/5.0/openmpsu160.html
for (unsigned int i = 0; i < I; i++) {
step_OpenMP();
}
end = omp_get_wtime();
seconds = end - start;
printf("Execution time %d seconds %d milliseconds for %d iterations\n", (int)seconds, (int)((seconds - (int)seconds) * 1000), I);
break;
case CUDA:
hipEvent_t cuda_start, cuda_stop; // CUDA Event timers
float milliseconds = 0; // Timing results variable (must be `float` type for call to `hipEventElapsedTime`)
hipEventCreate(&cuda_start); hipEventCreate(&cuda_stop); // Create CUDA Events
hipEventRecord(cuda_start); // Record the start time before calling the kernel launching simulation function
for (unsigned int i = 0; i < I; i++) {
step_CUDA();
}
hipEventRecord(cuda_stop); // Record the stop time once the simulation has finished
hipEventSynchronize(cuda_stop); // Ensure stop time has finished recording before continuing
checkCUDAError("Error running simulation kernel\n");
hipEventElapsedTime(&milliseconds, cuda_start, cuda_stop); // Write the elapsed time to `milliseconds`
printf("Execution time %d seconds %d milliseconds for %d iterations\n", (int)milliseconds / 1000, (int)milliseconds % 1000, I);
hipEventDestroy(cuda_start); hipEventDestroy(cuda_stop); // Cleanup CUDA Event timers
/* Copy the device output values in `d_nbodies` to the host memory `h_nbodies` then write to file for validation.
hipMemcpy(h_nbodies->x, d_nbodies->x, data_column_size, hipMemcpyDeviceToHost);
hipMemcpy(h_nbodies->y, d_nbodies->y, data_column_size, hipMemcpyDeviceToHost);
hipMemcpy(h_nbodies->vx, d_nbodies->vx, data_column_size, hipMemcpyDeviceToHost);
hipMemcpy(h_nbodies->vy, d_nbodies->vy, data_column_size, hipMemcpyDeviceToHost);
checkCUDAError("Copying final Nbody system state from device to host");
*/
break;
}
}
// Cleanup
if (M == CUDA) {
hipFree(d_nbodies->x);
hipFree(d_nbodies->y);
hipFree(d_nbodies->vx);
hipFree(d_nbodies->vy);
hipFree(d_nbodies->m);
hipFree(d_nbodies);
hipFree(out_x);
hipFree(out_y);
hipFree(activity_map);
checkCUDAError("Freeing memory from device with hipFree");
}
else { // Whether `out_x`, `out_y`, and `activity_map` are pointers on the host or device depends on operation mode
free(out_x);
free(out_y);
free(activity_map);
}
free(h_nbodies->x);
free(h_nbodies->y);
free(h_nbodies->vx);
free(h_nbodies->vy);
free(h_nbodies->m);
free(h_nbodies);
return 0;
}
/* Functions to perform the main simulation of the Nbody system by updating the state by one time step */
// Serial CPU version
void step_serial(void) {
/* The index `i` is used to iterate over the `N` bodies in the system. For each body `i`, we choose to calculate the
`N-1` interactions of the other bodies `j != i` on `i`, as opposed to the action of `i` on all of the other bodies `j != i`.
When computed in parallel, the former avoids a synchronisation step so that the velocity of each body `i`
can be updated independently of the other threads, reducing idle time. Afterwards, we can also update
the position of body `i` and calculate which histogram bin/activity grid cell the body `i` is in, all within one loop.
This is known as loop jamming or loop fusion. See http://www.it.uom.gr/teaching/c_optimization/tutorial.html
Calculating the histogram contribution of each body is far more efficient than iterating over histogram bins/grid cells
since we exploit the fact that each body can only be in at most one grid cell at a time (D*D times fewer calculations). */
unsigned int i, j; // Counter variables
float ax, ay; // Components of resultant acceleration of a particle as a result of gravitational force
float local_xi, local_yi; // Local position variables to reduce global memory accesses, especially during inner loop
float local_vxi, local_vyi; // Local velocity variables to exchange two global memory reads for one plus two local reads
float x_ji, y_ji; // Components of displacement vector from particle `i` to particle `j`
float dist_ij; // To hold softened distance `sqrt(|r_{ji}|^{2} + eps^{2})` from `i` to `j`
// Reset histogram values to zero with `memset`. See http://www.cplusplus.com/reference/cstring/memset/
memset(activity_map, 0, sizeof(activity_map));
for (i = 0; i < N; i++) { // Iterating over bodies in the Nbody system
ax = 0; // Reset resultant acceleration in `x` direction to zero for new particle
ay = 0; // Reset resultant acceleration in `y` direction to zero for new particle
// Read position data from global memory to the stack
local_xi = h_nbodies->x[i];
local_yi = h_nbodies->y[i];
for (j = 0; j < N; j++) {
if (j == i) { // Skip the calculation when i = j (saves calculation time; could consider branching effects on GPU)
continue;
}
// Calculate displacement from particle `i` to particle `j`, since common expression in force equation
// Using local variables for `x[i]`, `y[i]` here removes a global memory read from each inner loop iteration
x_ji = h_nbodies->x[j] - local_xi;
y_ji = h_nbodies->y[j] - local_yi;
// Calculate distance from `i` to `j` with softening factor since used in denominator of force expression
// Explicit casting required since `sqrt` function expects `double` type input and output; operation execution order
dist_ij = (float)sqrt((double)x_ji * x_ji + (double)y_ji * y_ji + eps_sq);
/* Add unscaled contribution to acceleration due to gravitational force of `j` on `i`
Universal Gravitation: `F_ij = G * m_i * m_j * r_ji / |r_ji|^3` ; Newton's 2nd Law: F_i = m_i * a_i
See top of file for further explanation of calculation, physical background */
ax += h_nbodies->m[j] * x_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
ay += h_nbodies->m[j] * y_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
/* It would be possible to add force/acceleration contributions to `h_nbodies->v` directly within this inner loop.
However this would cause this function to be bound by memory access latency (repeated writes to `h_nbodies->v`).
Therefore we use the temporary/local variables `ax` and `ay` instead */
}
/* Use current velocity, acceleration to calculate position, velocity at next time step, respectively. */
/* Former code uses extra heap memory buffers for velocity, adding extra steps pointer swapping and using more memory
However this implementation scores highly for readability, as it makes the intended outcome clear (no race conditions)
out_x[i] = h_nbodies->x[i] + h_nbodies->vx[i] * dt;
out_y[i] = h_nbodies->y[i] + h_nbodies->vy[i] * dt;
out_vx[i] = h_nbodies->vx[i] + G * ax * dt;
out_vy[i] = h_nbodies->vy[i] + G * ay * dt; */
// Using local velocity variables also reduces global memory reads, but only marginally compared `local_xi`, `local_yi`
local_vxi = h_nbodies->vx[i];
local_vyi = h_nbodies->vy[i];
// More care has to be taken about the order of execution to ensure the output positions are calculated correctly
// Use current velocity to calculate next position
local_xi += local_vxi * dt;
local_yi += local_vyi * dt;
// Now the local position variables hold the new positions and can be used to update the activity map
// Use current acceleration (based on current positions) to calculate the new velocity
// Scale `ax`, `ay` by gravitational constant `G`. See `NBody.h` for definition and comment.
h_nbodies->vx[i] = local_vxi + G * ax * dt; // Write the new velocity back to `h_nbodies->vx[i]`
h_nbodies->vy[i] = local_vyi + G * ay * dt; // Write the new velocity back to `h_nbodies->vy[i]`
// We can update particle velocities in-place without adversely affecting subsequent iterations/other threads
// Update the activity map - a flat array of D*D float values storing normalised particle density values in a 2D grid
// First check whether the new position of particle `i` is within the activity grid [0,1)^{2}
// if ((out_x[i] >= 0) && (out_x[i] < 1) && (out_y[i] >= 0) && (out_y[i] < 1)) {
if ((local_xi >= 0) && (local_xi < 1) && (local_yi >= 0) && (local_yi < 1)) {
// If so, calculate the index of the grid element that particle `i` is in and increment the associated histogram bin
// Multiply position vector by `D` then truncate components to `int` to find position in \{0,...,D-1\}^{2} grid
// Can result in race condition when outer `i` loop parallel as multiple threads could increment at once
// Possible solutions: Critical section; atomic operator; move section outside parallel loop (barrier/master method)
activity_map[D * (int)(D * local_yi) + (int)(D * local_xi)]++; // Linearize the index from 2D grid into 1D array
}
// Write the new position of particle `i` to the output buffers to avoid interfering with subsequent iterations
out_x[i] = local_xi;
out_y[i] = local_yi;
}
// Scale activity map values by `D / N` to normalize the histogram values and then scale by D to increase brightness
const float one_over_N = (float)1 / N; // Store the inverse of global variable `N` as a constant to cache value
for (i = 0; i < D * D; i++) {
activity_map[i] *= one_over_N * D;
}
/* Finally, update the `nbody` data pointers to reference the newly calculated arrays of position data.
We swap the input and output pointers rather than simply overwriting the input pointers because that would result
in losing the original input pointers, losing allocated heap memory addresses and causing a memory leak! */
float* temp; // Declare a temporary pointer to `float` to hold addresses whilst swapping the input and output pointers
temp = h_nbodies->x; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->x = out_x; // Update the `h_nbodies->x` pointer which is used for visualisation, and the next `step` iteration
out_x = temp; // Reset `out_x` to a 'fresh', 'empty' piece of memory
temp = h_nbodies->y; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->y = out_y; // Update the `h_nbodies->y` pointer which is used for visualisation, and the next `step` iteration
out_y = temp; // Reset `out_y` to a distinct piece of 'fresh' and 'empty' memory
}
/* Profiling with Visual Studio's Diagnostic Tools and PerfTips by setting breakpoints to time code segments and using
Debug->Windows->Show Diagnostic Tools https://docs.microsoft.com/en-us/visualstudio/profiling/profiling-feature-tour?view=vs-2019
Shows that as `N` increases the majority of time spent running the programme is spent calling the simulation step function,
and within that the loop over `N` particles (indexed by `i`) occupies most of the compute time rather than the loop over
the activity grid cells. This makes sense since there are far more compute steps within the `i` loop, and generally D will be
much smaller than `N` and is effectively limited in visualisation by screen resolution.
Therefore it is most important to parallelise the outer `i` loop. Further analysis suggests that as `N` increases further,
the majority of time spent within each outer loop is spent iterating over the inner `j` loop of interactions between particles,
so nested parallel loops should also be considered. Amongst all the operations/function calls within each simulation step,
it appears that the call to the `sqrt` function in the inner loop is the most expensive. */
// OpenMP version (For parallel computation on a multicore CPU)
/* Benchmarking results for parallelising outer loop over `i` (on my 4 core personal laptop)
Command Line Arguments | Histogram Race Handling | Scheduling | Execution Time(s)
"16384 16 CPU -i 10" | N/A | N/A | 61.357s, 61.988s
"16384 16 OPENMP -i 10" | Atomic | schedule(static) | 17.184s, 17.624s, 17.713s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 1) | 17.314s, 17.399s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 2) | 17.182s, 17.478s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 4) | 17.225s, 17.363s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 8) | 17.248s, 17.446s
"16384 16 OPENMP -i 10" | Atomic | schedule(guided) | 15.769s, 16.650s, 16.830s, 17.110s
"16384 16 OPENMP -i 10" | Atomic | schedule(dynamic) | 11.895s, 12.028s, 12.210s
"16384 16 OPENMP -i 10" | Atomic | schedule(dynamic, 2) | 11.841s, 11.997s, 12.266s
"16384 16 OPENMP -i 10" | Atomic | schedule(dynamic, 4) | 11.766s, 12.047s, 12.210s
"16384 16 OPENMP -i 10" | Critical | schedule(static, 4) | 17.341s, 18.007s
"16384 16 OPENMP -i 10" | Critical | schedule(guided) | 16.720s, 16.994s
"16384 16 OPENMP -i 10" | Critical | schedule(dynamic) | 11.617s, 11.888s, 11.999s, 13.054s
"8192 16 CPU -i 100" | N/A | N/A | 153.307s, 154.100s
"8192 16 OPENMP -i 100" | Atomic | schedule(dynamic) | 29.687s, 30.332s
"8192 16 OPENMP -i 100" | Critical | schedule(dynamic) | 28.490s, 30.305s
"2048 16 CPU -i 1000" | N/A | N/A | 95.132s, 95.174s, 95.677s
"2048 16 OPENMP -i 1000" | Atomic | schedule(static) | 29.240s, 29.584s
"2048 16 OPENMP -i 1000" | Atomic | schedule(static, 4) | 29.685s, 30.073s
"2048 16 OPENMP -i 1000" | Atomic | schedule(guided) | 29.357s, 29.521s
"2048 16 OPENMP -i 1000" | Atomic | schedule(dynamic) | 17.910s, 18.774s
"2048 16 OPENMP -i 1000" | Critical | schedule(dynamic) | 18.470s, 18.577s
"512 16 CPU -i 10000" | N/A | N/A | 59.003s, 59.404s
"512 16 OPENMP -i 10000" | Atomic | schedule(dynamic) | 12.734s, 13.735s
"256 16 CPU -i 100000" | N/A | N/A | 148.552s, 149.194s
"256 16 OPENMP -i 100000" | Atomic | schedule(static, 4) | 47.987s, 48.889s
"256 16 OPENMP -i 100000" | Atomic | schedule(dynamic) | 34.568s, 36.429s
"128 16 CPU -i 100000" | N/A | N/A | 37.436s, 37.520s
"128 16 OPENMP -i 100000" | Atomic | schedule(static, 4) | 12.587s, 13.110s
"128 16 OPENMP -i 100000" | Atomic | schedule(guided) | 12.448s, 12.541s
"128 16 OPENMP -i 100000" | Atomic | schedule(dynamic) | 11.717s, 12.071s
"64 16 CPU -i 1000000" | N/A | N/A | 92.477s, 92.617s
"64 16 OPENMP -i 1000000" | Atomic | schedule(static, 4) | 31.302s, 32.235s
"64 16 OPENMP -i 1000000" | Atomic | schedule(guided) | 32.292s, 33.169s
"64 16 OPENMP -i 1000000" | Atomic | schedule(dynamic) | 38.062s, 38.133s */
/* Initial Remarks on parallelising outer loop
The data shows that dynamic scheduling is faster for values of `N` greater than 100 or so, but slower than static scheduling for
smaller values on `N`, with guided scheduling always performing between to static and dynamic scheduling and never optimal.
This is because there is uneven workload amongst threads, which favours dynamic scheduling, but the overhead cost of dynamic
scheduling at runtime becomes a limiting factor for relatively small parallel loops.
A trend of increasing OpenMP performance relative to serial CPU performance as `N` increases can also be seen as the benefits
of parallelism outweigh their overhead costs.
I believe the main source of difference in workload between threads arises from whether the particle `i` lies within
the activity grid or not. If so, a slow (atomic/critical/serial) incrementation of an activity grid cell must occur, which also
involves writing to global memory at an index of the `activity_map` array that cannot be predicted at compile time, but if
the particle `i` lies outside the activity grid this step can be skipped, causing an uneven workload between different threads.
Scheduling approach seems to have a more important impact on performance than chunk size.
On the other hand, it appears that there's no major difference in performance between using a critical section or an atomic
directive to ensure the safe incrementation of the activity grid histogram, perhaps with only a slight leaning towards atomic.
For reference information on the OMP Atomic directive, see the following links:
https://www.openmp.org/spec-html/5.0/openmpsu95.html
https://www.ibm.com/support/knowledgecenter/SSGH2K_13.1.2/com.ibm.xlc131.aix.doc/compiler_ref/prag_omp_atomic.html */
/* Benchmarking results for parallelising inner loop over `j` only (on my 4 core personal laptop)
Command Line Arguments | Acceleration Sum Handling | Scheduling | Execution Time(s)
"8192 16 OPENMP -i 10" | Two Atomic Directives | schedule(static) | 54.309s
"8192 16 OPENMP -i 10" | Two Atomic Directives | schedule(dynamic) | 49.997s
"8192 16 OPENMP -i 10" | Critical Section | schedule(static) | 54.520s
"8192 16 OPENMP -i 10" | Critical Section | schedule(dynamic) | 61.185s
"256 16 OPENMP -i 10000" | Two Atomic Directives | schedule(static) | 57.011s
"256 16 OPENMP -i 10000" | Two Atomic Directives | schedule(static, 4) | 64.039s
"256 16 OPENMP -i 10000" | Two Atomic Directives | schedule(dynamic) | 90.021s
"256 16 OPENMP -i 10000" | Critical Section | schedule(static) | 58.980s
"256 16 OPENMP -i 10000" | Critical Section | schedule(dynamic) | 79.218s */
/* Remarks on parallelising inner loop
The data shows that only parallelising the inner `j` loop over force interactions results in a 3-4x slowdown
compared to the serial CPU version. This is because of repeated overheads setting up small parallel loops within a larger loop.
The story might be different on a machine with more cores (e.g. 16 cores rather than 4 cores), but when compared to the
3-4x speedup over serial implementation from parallelising the outer `i` loop over bodies in the system, it is clear which is
preferred. As a general rule, outer loops should be parallelised first (assuming they run for a reasonable number of iterations).
Final remarks and conclusion
Finally, through testing an implementation of nested parallel loops we find the following performance heirarchy for the given
problem: Parallel outer loop > Serial CPU version > Parallel histogram scaling > Nested parallel loops > Parallel inner loop.
For **nested** parallel loops with dynamic scheduling and atomic directives to avoid race conditions when
1) Incrementing activity map contributions; and 2) Summing acceleration contributions with the inner `j` loop parallel;
given command line arguments "8192 16 OPENMP -i 100", an execution time of 268.809s was recorded, about 75% slower than serial.
In conclusion, we choose to parallelise the force calculation outer loop over `i` iterating over bodies in the system as it
is the best and only loop parallelisation which improves on the serial CPU version (by a respectable 3-6x speedup),
we choose dynamic scheduling since it outperforms static and guided scheduling for values of `N` greater than around 100, where
many feasible values of `N` lie (a separate parallel directive to choose static scheduling when N < 100 could be considered).
Finally, to avoid race conditions when each thread uses the position of its local particle to update the shared `activity_map`
histogram, we choose to use an atomic directive, though this only appears to be negligably better than a critical section. */
void step_OpenMP(void) {
/* The index `i` is used to iterate over the `N` bodies in the system. For each body `i`, we choose to calculate the
`N-1` interactions of the other bodies `j != i` on `i`, as opposed to the action of `i` on all of the other bodies `j != i`.
This is because the latter requires an extra synchronisation step before the velocity of each body `i` can be calculated,
increasing thread idle time. Subsequently, we can also update the position of body `i` and calculate its activity grid
position within the same parallel loop, reducing overhead. This is known as loop jamming or loop fusion.
See http://www.it.uom.gr/teaching/c_optimization/tutorial.html
Calculating the histogram contribution of each body is far more efficient than iterating over histogram bins/grid cells
since we exploit the fact that each body can only be in at most one grid cell at a time (D*D times fewer calculations). */
int i, j; // Counter variables. OpenMP requires these to be `int` type rather than unsigned
float ax, ay; // Components of resultant acceleration of a particle as a result of gravitational force
float local_xi, local_yi; // Local position variables to reduce global memory accesses, especially during inner loop
float local_vxi, local_vyi; // Local velocity variables to exchange two global memory reads for one plus two local reads
float x_ji, y_ji; // Components of displacement vector from particle `i` to particle `j`
float dist_ij; // To hold softened distance `sqrt(|r_{ji}|^{2} + eps^{2})` from `i` to `j`
// Reset histogram values to zero with `memset`. See http://www.cplusplus.com/reference/cstring/memset/
memset(activity_map, 0, sizeof(activity_map));
//omp_set_nested(1);
#pragma omp parallel for default(none) private(i, j, ax, ay, local_xi, local_yi, x_ji, y_ji, dist_ij, local_vxi, local_vyi) shared(h_nbodies, activity_map, D, out_x, out_y) schedule(dynamic)
for (i = 0; i < N; i++) { // Iterating over bodies in the Nbody system
ax = 0; // Reset resultant acceleration in `x` direction to zero for new particle
ay = 0; // Reset resultant acceleration in `y` direction to zero for new particle
// Read position data from global memory to the stack
local_xi = h_nbodies->x[i];
local_yi = h_nbodies->y[i];
// Can treat `i` as a shared variable on the inner `j` loop since we read without changing within each outer loop iteration
// Otherwise could use `firstprivate(i)` declaration to pass in the value to each thread
//#pragma omp parallel for default(none) private(j, x_ji, y_ji, dist_ij) shared(i, ax, ay, local_xi, local_yi, h_nbodies) schedule(dynamic)
for (j = 0; j < N; j++) {
if (j == i) { // Skip the calculation when i = j (saves calculation time; could consider branching effects on GPU)
continue;
}
// Calculate displacement from particle `i` to particle `j`, since common expression in force equation
// Using local variables for `x[i]`, `y[i]` here removes a global memory read from each inner loop iteration
x_ji = h_nbodies->x[j] - local_xi;
y_ji = h_nbodies->y[j] - local_yi;
// Calculate distance from `i` to `j` with softening factor since used in denominator of force expression
// Explicit casting required since `sqrt` function expects `double` type input and output; operation execution order
dist_ij = (float)sqrt((double)x_ji * x_ji + (double)y_ji * y_ji + eps_sq);
/* Add unscaled contribution to acceleration due to gravitational force of `j` on `i`
Universal Gravitation: `F_ij = G * m_i * m_j * r_ji / |r_ji|^3` ; Newton's 2nd Law: F_i = m_i * a_i
See top of file for further explanation of calculation, physical background */
// If the inner `j` loop is parallel, adding to `ax[i]` will result in a race condition.
// Could try a reduction directive for `ax`, `ay` in the parallel inner loop directive if supported by OpenMP 2.0
//#pragma omp critical {
ax += h_nbodies->m[j] * x_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
ay += h_nbodies->m[j] * y_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
//}
/* It would be possible to add force/acceleration contributions to `h_nbodies->v` directly within this inner loop.
However this would cause this function to be bound by memory access latency (repeated writes to `h_nbodies->v`).
Therefore we use the temporary/local variables `ax` and `ay` instead */
}
/* Use current velocity, acceleration to calculate position, velocity at next time step, respectively. */
/* Former code uses extra heap memory buffers for velocity, adding extra steps pointer swapping and using more memory
However this implementation scores highly for readability, as it makes the intended outcome clear (no race conditions)
out_x[i] = h_nbodies->x[i] + h_nbodies->vx[i] * dt;
out_y[i] = h_nbodies->y[i] + h_nbodies->vy[i] * dt;
out_vx[i] = h_nbodies->vx[i] + G * ax * dt;
out_vy[i] = h_nbodies->vy[i] + G * ay * dt; */
// Using local velocity variables also reduces global memory reads, but only marginally compared `local_xi`, `local_yi`
local_vxi = h_nbodies->vx[i];
local_vyi = h_nbodies->vy[i];
// More care has to be taken about the order of execution to ensure the output positions are calculated correctly
// Use current velocity to calculate next position
local_xi += local_vxi * dt;
local_yi += local_vyi * dt;
// Now the local position variables hold the new positions and can be used to update the activity map
// Use current acceleration (based on current positions) to calculate the new velocity
// Scale `ax`, `ay` by gravitational constant `G`. See `NBody.h` for definition and comment.
h_nbodies->vx[i] = local_vxi + G * ax * dt; // Write the new velocity back to `h_nbodies->vx[i]`
h_nbodies->vy[i] = local_vyi + G * ay * dt; // Write the new velocity back to `h_nbodies->vy[i]`
// We can update particle velocities in-place without adversely affecting subsequent iterations/other threads
// Update the activity map - a flat array of D*D float values storing normalised particle density values in a 2D grid
// First check whether the new position of particle `i` is within the activity grid [0,1)^{2}
// if ((out_x[i] >= 0) && (out_x[i] < 1) && (out_y[i] >= 0) && (out_y[i] < 1)) {
if ((local_xi >= 0) && (local_xi < 1) && (local_yi >= 0) && (local_yi < 1)) {
// If so, calculate the index of the grid element that particle `i` is in and increment the associated histogram bin
// Multiply position vector by `D` then truncate components to `int` to find position in \{0,...,D-1\}^{2} grid
// Can result in race condition when outer `i` loop parallel as multiple threads could increment at once
// Possible solutions: Critical section; atomic operator; move section outside parallel loop (barrier/master method)
/* Atomic operations can be used to safely increment a shared numeric value; critical regions have other uses too */
#pragma omp atomic
activity_map[D * (int)(D * local_yi) + (int)(D * local_xi)]++; // Linearize the index from 2D grid into 1D array
}
// Write the new position of particle `i` to the output buffers to avoid interfering with other threads/iterations
out_x[i] = local_xi;
out_y[i] = local_yi;
}
// Scale activity map values by `D / N` to normalize the histogram values and then scale by D to increase brightness
const float one_over_N = (float)1 / N; // Store the inverse of global variable `N` as a constant to cache value
/* Parallelising this histogram scaling loop actually has a negative impact on performance due to fork/join overheads
outweighing the small gains from parallelising a non-compute intensive loop. Using command line arguments (release mode)
"2048 1024 OPENMP -i 100" we reliably time 9.8 seconds for serial execution vs 11.5s-11.9s with this loop parallel
and using static or guided scheduling (chunk size has little effect) and 14.5s-14.9s for dynamic scheduling.
The reason dynamic scheduling is even slower than static scheduling is the extra runtime overheads of dynamic scheduling
where the workloads are extremely uniform (two multiplications per loop) */
//#pragma omp parallel for default(none) private(i) shared(activity_map, one_over_N, D) schedule(dynamic)
for (i = 0; i < D * D; i++) {
activity_map[i] *= one_over_N * D;
}
/* Finally, update the `nbody` data pointers to reference the newly calculated arrays of position data.
We swap the input and output pointers rather than simply overwriting the input pointers because that would result
in losing the original input pointers, losing allocated heap memory addresses and causing a memory leak! */
float* temp; // Declare a temporary pointer to `float` to hold addresses whilst swapping the input and output pointers
temp = h_nbodies->x; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->x = out_x; // Update the `h_nbodies->x` pointer which is used for visualisation, and the next `step` iteration
out_x = temp; // Reset `out_x` to a 'fresh', 'empty' piece of memory
temp = h_nbodies->y; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->y = out_y; // Update the `h_nbodies->y` pointer which is used for visualisation, and the next `step` iteration
out_y = temp; // Reset `out_y` to a distinct piece of 'fresh' and 'empty' memory
}
// CUDA version (for parallel computation on GPU)
void step_CUDA(void) {
/* This host function sets up kernel launch parameters and launches GPU kernel(s) to calculate one simulation step */
// First reset histogram values to zero with `hipMemset`.
// See https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html for documentation
hipMemset(activity_map, 0, sizeof(activity_map));
// Prepare kernel launch parameters
// Ensure we have the minimum number of blocks needed for total threads to exceed `N`
unsigned int blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// Run the kernel
simulation_kernel << <blocks, THREADS_PER_BLOCK >> > (d_nbodies, out_x, out_y, activity_map, N, D);
//checkCUDAError("Error running simulation kernel\n");
/* New velocities and activity map data have been calculated in-place by the call to `simulation_kernel`, whilst new
position data has been written to the buffers `out_x`, `out_y`. We must update the `d_nbodies` data pointers accordingly. */
swap_float_pointers(&d_nbodies->x, &out_x);
swap_float_pointers(&d_nbodies->y, &out_y);
}
/* Functions for parsing Command Line Arguments
The expected arguments are: "nbody.exe N D M [-i I] [-f input_file]" */
void print_help() {
printf("USAGE: \"nbody.exe N D M [-i I] [-f input_file]\", where\n");
printf(" N is the number of bodies to simulate.\n");
printf(" D is the integer dimension of the activity grid. The Grid has D*D locations.\n");
printf(" M is the operation mode, either `CPU` or `OPENMP`\n");
printf(" [-i I] [OPTIONAL] Specifies number `I` of simulation iterations to perform. Visualisation mode is used when `-i` flag not set.\n");
printf("[-f input_file] [OPTIONAL] Specifies an input file with an initial `N` bodies of data. A random initial state will be generated when `-f` flag not set.\n");
}
void parseNDM(const char* argv[3]) {
N = parse_str_as_uint(argv[0]);
checkLastError("Error parsing argument for `N` to `int`");
if (N == 0) {
fprintf(stderr, "Error: Argument \"%s\" for number of bodies `N` parsed as 0.\n", argv[0]);
print_help();
exit(EXIT_FAILURE);
}
D = parse_str_as_uint(argv[1]);
checkLastError("Error parsing argument for `D` to `int`");
if (strcmp(argv[2], "CPU") == 0) {
M = CPU;
}
else if (strcmp(argv[2], "OPENMP") == 0) {
M = OPENMP;
}
else if (strcmp(argv[2], "CUDA") == 0) {
M = CUDA;
}
else {
fprintf(stderr, "Error: Unexpected value %s for operation mode `M` (case sensitive).\n", argv[3]);
print_help();
exit(EXIT_FAILURE);
}
}
void parse_one_option(const char* options[2]) {
if (strcmp(options[0], "-i") == 0) {
I = parse_str_as_uint(options[1]);
checkLastError("Error parsing argument for `I` to `int`");
}
else if (strcmp(options[0], "-f") == 0) {
f_flag = 5;
}
else { // Invalid option flag
fprintf(stderr, "Error: Unexpected optional arguments/flags received.\n");
print_help();
exit(EXIT_FAILURE);
}
}
void parse_two_options(const char* options[4]) {
if ((strcmp(options[0], "-i") == 0) && (strcmp(options[2], "-f") == 0)) {
I = parse_str_as_uint(options[1]);
checkLastError("Error parsing argument for `I` to `int`");
f_flag = 7;
}
else if ((strcmp(options[0], "-f") == 0) && (strcmp(options[2], "-i") == 0)) {
I = parse_str_as_uint(options[3]);
checkLastError("Error parsing argument for `I` to `int`");
f_flag = 5;
}
else { // Invalid option flag combination
fprintf(stderr, "Error: Unexpected combination of optional arguments/flags received.\n");
print_help();
exit(EXIT_FAILURE);
}
}
unsigned int parse_str_as_uint(const char * str) {
if (isdigit(str[0]) == 0) { // In particular, this excludes leading minus sign/negative input values.
fprintf(stderr, "Error parsing %s as `int`: First char not decimal digit (negative values not permitted).\n", str);
print_help();
exit(EXIT_FAILURE);
}
unsigned int val; // To hold parsed `unsigned int` value
char* pEnd; // Pointer to first character after number in `str`
val = (unsigned int)strtol(str, &pEnd, 10); // Convert string to long integer in base 10. Set `pEnd`.
if (pEnd[0] != '\0') { // Check for extra characters in `str` after initial number (can include decimal point)
fprintf(stderr, "Error: Unexpected characters in string %s when parsing to `int`.\n", str);
print_help();
exit(EXIT_FAILURE);
}
return val;
}
/* Functions for reading input files */
void read_nbody_file(const char* filename, const int N) {
FILE* f; // Input file handle
char line_buffer[BUFFER_SIZE]; // Buffer to hold lines read from file
char* ptr_ch = NULL; // Pointer to track character position when reading `line_buffer` string
int line_number = 0; // Keep track of line number for error messaging
int body_count = 0; // Count of number of body data lines read to ensure it matches `N`
f = fopen(filename, "r"); // Open the file in read-only mode
if (f == NULL) {
fprintf(stderr, "Error opening file '%s' for reading\n", filename);
exit(EXIT_FAILURE);
}
/* Read file line by line with `fgets` function. See http://www.cplusplus.com/reference/cstdio/fgets/ for reference
Reads from file into buffer until (soonest of) either `\n` or `EOF` is read, or `BUFFER_SIZE-1` characters read */
while (fgets(line_buffer, BUFFER_SIZE, f) != NULL) {
line_number++; // Increment count of lines read
if (line_buffer[0] == '#') { // If first char in line is `#` skip to next line to ignore comments
continue;
}
if (line_buffer[strlen(line_buffer) - 1] != '\n') { // If last char read from file is not '\n', the line is too long
// This checks that a full line of data was written from file to buffer when not a comment line
fprintf(stderr, "Error reading line %u: Line length exceeds buffer size of %d characters\n", line_number, BUFFER_SIZE);
exit(EXIT_FAILURE);
}
/* Read the line of data into `h_nbodies`, using comma character `,` as delimiter to separate data values
This could be considered as an unrolled while loop over commas counted using `strchr` calls with nontrivial control flow
The use of `ptr_ch` as a separate variable from `line_buffer` could probably be removed. */
ptr_ch = line_buffer; // Place `ptr_ch` at the start of the line to be read
/* Use `strchr` to search through the line starting at position `ptr_ch` to find the next comma `,` character
Returns `NULL` pointer if no comma `,` character found in line after position `ptr_ch`
See http://www.cplusplus.com/reference/cstring/strchr/ for reference */
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after first data value
fprintf(stderr, "Error reading line %u: No data delimiters (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { // This appears to be a valid data line. Don't write past memory bounds for `h_nbodies`!
if (body_count > N-1) { // Throw an error if we have already read `N` or more data rows
fprintf(stderr, "Error reading line %u: Num bodies in file exceeds input N (%d)\n", line_number, N);
exit(EXIT_FAILURE);
}
/* Read `float x` value or randomly generate if data missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `x` value, then update `ptr_ch` to point to end of number
h_nbodies->x[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `x` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `x` data to float\n", line_number);
exit(EXIT_FAILURE);
}
}
else { // Decide data missing or corrupted - means we ignore strings like ".5" and "-.2"
h_nbodies->x[body_count] = (float)rand() / RAND_MAX; // Random position in [0,1]
}
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after the 1st comma
}
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after second data value
fprintf(stderr, "Error reading line %u: Only 1 data delimiter (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { /* Read `float y` value or randomly generate if missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `y` value, then update `ptr_ch` to point to end of number
h_nbodies->y[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `y` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `y` data to float\n", line_number);
exit(EXIT_FAILURE);
}
}
else { // Decide data missing or corrupted - means we ignore strings like ".5" and "-.2"
h_nbodies->y[body_count] = (float)rand() / RAND_MAX; // Random position in [0,1]
}
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after 2nd comma
}
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after third data value
fprintf(stderr, "Error reading line %u: Only 2 data delimiters (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { /* Read `float vx` value or set to zero if missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `vx` value, then update `ptr_ch` to point to end of number
h_nbodies->vx[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `vx` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `vx` data to float\n", line_number);
exit(EXIT_FAILURE);
}
} // Otherwise decide data is missing or corrupted - means strings like ".5" and "-.2" are ignored
// In this case we don't change `vx` since velocity array filled with zeroes by default
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after 3rd comma
}
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after fourth data value
fprintf(stderr, "Error reading line %u: Only 3 data delimiters (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { /* Read `float vy` value or set to zero if missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `vx` value, then update `ptr_ch` to point to end of number
h_nbodies->vy[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `vy` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `vy` data to float\n", line_number);
exit(EXIT_FAILURE);
}
} // Otherwise decide data is missing or corrupted - means strings like ".5" and "-.2" are ignored
// In this case we don't change `vy` since velocity array filled with zeroes by default
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after 4th comma
}
if ((strchr(ptr_ch, ',') != NULL)) { // Ensure no more commas after fifth data value
fprintf(stderr, "Error reading line %u: Too many data columns detected (5 expected)\n", line_number);
exit(EXIT_FAILURE);
}
else { // Else read from after the 4th/last comma (`ptr_ch`) to the end of the line
/* Read `float m` value or set to 1/N if data missing, corrupted, or zero (no massless bodies) */
if (strtod(ptr_ch, NULL) == 0) { // If zero returned, then input data was either missing, corrupted, or zero
fprintf(stderr, "Error reading line %u: Mass data missing, corrupted, or set to zero. Replacing with default value (1/N) to avoid massless bodies\n", line_number);
// Set mass to 1/N to avoid creating massless objects (and divide-by-zero problems later)
h_nbodies->m[body_count] = (float)1 / N; // Mass distributed equally among N bodies
}
else { // Otherwise non-zero `float` value for mass read successfully, so write to `m`
// Parse and store `m` value, then update `ptr_ch` to point to end of number
h_nbodies->m[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing mass data to `float`");
if (strpbrk(ptr_ch, "0123456789") != NULL) { // Check there are no further digits before the end of the line
fprintf(stderr, "Error reading line %u: Unexpected format when parsing mass data\n", line_number);
exit(EXIT_FAILURE);
}
}
} // One line of nbody data has been read successfully. Increment the body count.
body_count++;
// Read new line if not end of file. Thus data file can be terminated with single empty line.
}
if (body_count != N) { // Check fails when fewer than N bodies in file
fprintf(stderr, "Error: Num bodies in file (%u) does not match input N (%d)\n", body_count, N);
exit(EXIT_FAILURE);
}
fclose(f);
}
void checkLastError(const char* msg) {
if (errno != 0) {
perror(msg);
print_help();
exit(EXIT_FAILURE);
}
}
void checkCUDAError(const char* msg) {
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
| 1fe6a9791bc9b7437cf110dbfd9cb441344e9f38.cu | /* Assignment 2 Program
Building upon the code written for assignment 1, this program implements GPU code for N-body simulation and visualization.
In total, we implement a serial CPU version, an OpenMP version for multicore processors, and a CUDA version for Nvidia GPUs.
Timing code is also included for benchmarking performance.
The accompanying report provides discussion on design considerations regarding performance optimization and validation. */
/* Problem Description
We consider a system of N bodies in frictionless 2D space exerting gravitational force on each other.
See https://en.wikipedia.org/wiki/N-body_problem for further background on the physics of the N-body problem.
We simulate the progression of such an N-body system through time using numerical integration by evaluating all pairwise
gravitational interactions between bodies in the system. The force `F_{ij}` of gravity on a body `i` exerted by a body `j`
can be calculated through the following formula: `F_{ij} = G*m_{i}*m_{j}*r_{ji}/|r_{ji}|^{3}` where `G` is the gravitational
constant, `m` denotes the mass of a body, and `r_{ji}` denotes the displacement vector from `i` towards `j`.
This is known as [Newton's Law of Universal Gravitation](https://en.wikipedia.org/wiki/Newton%27s_law_of_universal_gravitation)
We add a softening factor `eps` to the denominator to avoid the force between two approaching bodies growing without bound.
This replaces `|r_{ji}|` with `sqrt(|r_{ji}|^{2} + eps^{2})` in the expression in the denominator.
At each time `t_{k}` we calculate the resultant (sum total) force `F_{i;k}` on each body and use this to calculate
acceleration `a_{i;k}`, then use the [Forward Euler method](https://en.wikipedia.org/wiki/Euler_method) to update the
velocity and position at time `t_{k+1} = t_{k} + dt` based on `a_{i;k}`, `v_{i;k}`, respectively, where `dt` is the time step. */
/* C Language Library headers */
#include <stdio.h> // http://www.cplusplus.com/reference/cstdio/
#include <stdlib.h> // http://www.cplusplus.com/reference/cstdlib/
#include <string.h> // http://www.cplusplus.com/reference/cstring/
#include <ctype.h> // http://www.cplusplus.com/reference/cctype/
#include <time.h> // http://www.cplusplus.com/reference/ctime/
#include <math.h> // http://www.cplusplus.com/reference/cmath/
/* To enable OpenMP support in your project you will need to include the OpenMP header file `omp.h`
and enable the compiler to use the OpenMP runtime.
Set 'OpenMP Support' to 'Yes' (for both Debug and Release builds) in Project->Properties->C/C++->Language
Add `_CRT_SECURE_NO_WARNINGS` to 'Preprocessor Definitions' in Project->Properties->C/C++->Preprocessor */
#include <omp.h>
#include <cuda_runtime.h>
/* Local header files */
#include "NBody.h"
#include "NBodyVisualiser.h"
/* Preprocessor definitions/macros */
#define USER_NAME "smp16emp" // Replace with your username
#define BUFFER_SIZE 128 // Maximum line length accepted from input file (reasonable as only 5 (comma separated) floating point numbers expected)
#define THREADS_PER_BLOCK 256
/* Function declarations/prototypes */
void print_help();
void parseNDM(const char* argv[3]);
void parse_one_option(const char* argv[2]);
void parse_two_options(const char* argv[4]);
unsigned int parse_str_as_uint(const char* str);
void read_nbody_file(const char* filename, const int N);
void checkLastError(const char* msg);
void step_serial(void);
void step_OpenMP(void);
void step_CUDA(void);
void swap_float_pointers(float** p1, float** p2);
/* Global variables (shared by/used in multiple functions) */
/* Command line inputs */
unsigned int N; // Number of bodies in the system
unsigned int D; // Dimension of the activity grid
MODE M; // Operation mode. Allows CPU = 0, OPENMP = 1, CUDA = 2
unsigned int I = 0; // Number of iterations of the simulation to calculate when the `-i` flag is set, else 0
unsigned int f_flag = 0; // Input file flag. 0 if not specified, else such that `input_filename = options[f_flag]` in `main`.
/* Data buffers */
nbody_soa* h_nbodies; // Pointer to a structure of arrays (preferred over an array of structures for coalesced memory access)
/* Separate output buffers for updated particle positions are required to avoid interference between loop iterations/threads
when calculating forces based on current particle positions. Buffers for output velocity components are not required
because a given particle's velocity is only used to calculate its own new position and nothing else. However this requires
each particle's new position be calculated first before its velocity is updated in-place.
Pointer swapping can be used to reduce memory copying between multiple buffers when updating system state.
See https://en.wikipedia.org/wiki/Multiple_buffering for more on double/multiple buffering
The visualiser only (re)reads position data once after each time the simulation `step` function completes,
rather than throughout the whole `step` calculation process, so the particles update positions in sync anyway */
/* Whether the following three pointers are host pointers or device pointers will depend on the operation mode */
float* out_x; // Pointer to store the new `x` coordinate of each body before updating in sync after loops complete
float* out_y; // Pointer to store the new `y` coordinate of each body before updating in sync after loops complete
float* activity_map; // Pointer to flattened array of D*D float values storing normalised particle density values in a 2D grid
/* Device pointers */
nbody_soa* d_nbodies; // Device pointer for nbody data
/* Device Functions and Kernels */
__device__ void swap_float_pointers(float** p1, float** p2) {
// Function arguments are always passed by value, so to swap two pointers, we must pass references to those pointers
// The arguments `p1` and `p2` are actually addresses of pointers to `float` data (rather than the pointers themselves)
float* temp = *p1; // Set `temp` to be the pointer referenced by p1
*p1 = *p2; // Overwrite the pointer addressed by `p1` with the pointer addressed by `p2`
*p2 = temp; // Overwrite the pointer addressed by `p2` with the pointer addressed by `temp` (originally addressed by `p1`)
}
__global__ void simulation_kernel(nbody_soa * nbody_in, float * new_x, float * new_y, float * activity, const unsigned int N, const unsigned int D) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // Iterating over bodies in the Nbody system, one thread per body
if (i < N) { // One unique index for each body and any leftover threads stay idle
float ax = 0, ay = 0; // Initialise resultant acceleration to zero
// Read position data from global/constant/texture memory to thread-local stack variables
float local_xi = nbody_in->x[i];
float local_yi = nbody_in->y[i];
// Calculate the acceleration of body `i` due to gravitational force from the other bodies
for (unsigned int j = 0; j < N; j++) {
if (j == i) { // Skip the calculation when i = j
continue;
}
// Calculate displacement from particle `i` to particle `j`, since common expression in force equation
float x_ji = nbody_in->x[j] - local_xi;
float y_ji = nbody_in->y[j] - local_yi;
// Calculate distance from `i` to `j` with softening factor since used in denominator of force expression
// Single precision square root: https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html
float dist_ij = sqrtf(x_ji * x_ji + y_ji * y_ji + eps_sq);
/* Add unscaled contribution to acceleration due to gravitational force of `j` on `i`
Universal Gravitation: `F_ij = G * m_i * m_j * r_ji / |r_ji|^3` ; Newton's 2nd Law: F_i = m_i * a_i */
ax += nbody_in->m[j] * x_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
ay += nbody_in->m[j] * y_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
/* It would be possible to add force/acceleration contributions to `d_nbodies->v` directly within this inner loop.
However this would cause this function to be bound by memory access latency (repeated writes to `d_nbodies->v`).
Therefore we use the temporary/local variables `ax` and `ay` instead */
}
/* Use current velocity, acceleration to calculate position, velocity at next time step, respectively. */
float local_vxi = nbody_in->vx[i];
float local_vyi = nbody_in->vy[i];
// Care has to be taken about the order of execution to ensure the output positions are calculated correctly
// Use current velocity to calculate next position
local_xi += local_vxi * dt;
local_yi += local_vyi * dt;
// Now the local position variables hold the new positions and can be used to update the activity map
// Use current acceleration (based on current positions) to calculate the new velocity
// Scale `ax`, `ay` by gravitational constant `G`. See `NBody.h` for definition and comment.
nbody_in->vx[i] = local_vxi + G * ax * dt; // Write the new velocity back to `d_nbodies->vx[i]`
nbody_in->vy[i] = local_vyi + G * ay * dt; // Write the new velocity back to `d_nbodies->vy[i]`
// We can update particle velocities in-place without adversely affecting subsequent iterations/other threads
// Write the new position of particle `i` to the output buffers to avoid interfering with other threads
new_x[i] = local_xi;
new_y[i] = local_yi;
// Pointer swapping of position data occurs outside of the kernel launch in the `step_CUDA` function
// Update the activity map - a flat array of D*D float values storing normalised particle density values in a 2D grid
// First check whether the new position of particle `i` is within the activity grid [0,1)^{2}. Branching thread logic.
if ((local_xi >= 0) && (local_xi < 1) && (local_yi >= 0) && (local_yi < 1)) {
// If so, calculate the index of the grid element that particle `i` is in
// Multiply position vector by `D` then truncate components to `int` to find position in \{0,...,D-1\}^{2} grid
unsigned int index = D * (int)(D * local_yi) + (int)(D * local_xi); // Linearize the index from 2D grid into 1D array
// Increase the associated histogram bin by the normalised quantity `D/N` (scaling by D to increase brightness)
// Can result in race condition as multiple threads could increment at once. Could solve with `atomicAdd`
activity[index] += (float) D / N;
// Unfortunately this is a random access (write) to global memory and cannot easily be coalesced
/* We choose not to reduce the number of multiplication/division operations by incrementing the histogram bin by one
at this step and then scaling the histogram counts in a separate loop (as in the other implementations) in order to
avoid launching a separate grid/kernel with D^2 threads, thus reducing the number of kernel launchs */
}
}
}
/* For information on how to parse command line parameters, see http://www.cplusplus.com/articles/DEN36Up4/
`argc` in the count of the command arguments, and `argv` is an array (of length `argc`) of the arguments.
The first argument is always the executable name (including path) */
int main(const int argc, const char *argv[]) {
/* Process the command line arguments */
switch (argc) {
case 4: // No optional flags used
parseNDM(&argv[1]);
break;
case 6: // One optional flag and argument used
parse_one_option(&argv[4]);
parseNDM(&argv[1]);
break;
case 8: // Two optional flags with arguments used
parse_two_options(&argv[4]);
parseNDM(&argv[1]);
break;
default: // The expected arguments are: "nbody.exe N D M [-i I] [-f input_file]"
fprintf(stderr, "Error: Unexpected number of arguments. %d arguments (including executable name) received\n", argc);
print_help();
exit(EXIT_FAILURE);
}
// Declare a function pointer to a simulation step function and set its value according to the operation mode `M`
void (*simulate)(void) = NULL; // Declare `simulate` as pointer to function (void) returning void
switch (M) {
case CPU:
simulate = &step_serial;
break;
case OPENMP:
simulate = &step_OpenMP;
printf("OpenMP using %d threads\n", omp_get_max_threads());
break;
case CUDA:
simulate = &step_CUDA;
break;
}
/* Allocate Heap Memory */
// Calculate memory requirements
const unsigned int data_column_size = sizeof(float) * N;
const unsigned int activity_grid_size = sizeof(float) * D * D;
// Memory allocation. See http://www.cplusplus.com/reference/cstdlib/malloc/
h_nbodies = (nbody_soa*)malloc(sizeof(nbody_soa));
h_nbodies->x = (float*)malloc(data_column_size);
h_nbodies->y = (float*)malloc(data_column_size);
// Allocates memory block for length N array of floats, and initialize all bits to zero (for default zero initial velocity).
// See http://www.cplusplus.com/reference/cstdlib/calloc/
h_nbodies->vx = (float*)calloc(N, sizeof(float)); // Zero initial velocity
h_nbodies->vy = (float*)calloc(N, sizeof(float)); // Zero initial velocity
h_nbodies->m = (float*)malloc(data_column_size);
if ((h_nbodies == NULL) || (h_nbodies->x == NULL) || (h_nbodies->y == NULL) || (h_nbodies->vx == NULL) || (h_nbodies->vy == NULL) || (h_nbodies->m == NULL)) {
fprintf(stderr, "Error allocating host memory (`h_nbodies`) for system with %d bodies\n", N);
exit(EXIT_FAILURE);
}
if (M == CUDA) {
/* Allocate device memory */
cudaMalloc((void**)&d_nbodies, sizeof(nbody_soa));
cudaMalloc((void**)&d_nbodies->x, data_column_size);
cudaMalloc((void**)&d_nbodies->y, data_column_size);
cudaMalloc((void**)&d_nbodies->vx, data_column_size);
cudaMalloc((void**)&d_nbodies->vy, data_column_size);
cudaMalloc((void**)&d_nbodies->m, data_column_size);
cudaMalloc((void**)&out_x, data_column_size);
cudaMalloc((void**)&out_y, data_column_size);
cudaMalloc((void**)&activity_map, activity_grid_size);
checkCUDAError("Memory allocation on device with cudaMalloc");
}
else { // Whether `out_x`, `out_y`, and `activity_map` are pointers on the host or device depends on operation mode
out_x = (float*)malloc(data_column_size);
out_y = (float*)malloc(data_column_size);
if ((out_x == NULL) || (out_y == NULL)) {
fprintf(stderr, "Error allocating host memory (output position buffers) for system with %d bodies\n", N);
exit(EXIT_FAILURE);
}
activity_map = (float*)malloc(activity_grid_size);
if (activity_map == NULL) {
fprintf(stderr, "Error allocating host memory (`activity map`) for system with %d bodies, activity grid size %d\n", N, D);
exit(EXIT_FAILURE);
}
}
/* Read initial data from file to host memory, or generate random initial state according to optional program flag `-f`. */
if (f_flag == 0) { // No input file specified, so a random initial N-body state will be generated
const float one_over_N = (float)1 / N; // Store the inverse of `N` as a constant to avoid recalculating in loop
for (unsigned int i = 0; i < N; i++) {
h_nbodies->x[i] = (float)rand() / RAND_MAX; // Random position in [0,1]
h_nbodies->y[i] = (float)rand() / RAND_MAX; // Random position in [0,1]
h_nbodies->m[i] = one_over_N; // Mass distributed equally among N bodies
// Note that velocity data has already been initialized to zero for all bodies
}
}
else { // Attempt to read initial N-body system state from input csv file to host memory
read_nbody_file(argv[f_flag], N);
}
if (M == CUDA) {
/* Copy the host input values in `h_nbodies` to the device memory `d_nbodies`. */
cudaMemcpy(d_nbodies->x, h_nbodies->x, data_column_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_nbodies->y, h_nbodies->y, data_column_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_nbodies->vx, h_nbodies->vx, data_column_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_nbodies->vy, h_nbodies->vy, data_column_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_nbodies->m, h_nbodies->m, data_column_size, cudaMemcpyHostToDevice);
checkCUDAError("Input transfer to device");
}
/* According to the value of program argument `I` either configure and start the visualiser,
or perform a fixed number of simulation steps and output the timing results. */
if (I == 0) { // Run visualiser when number of iterations not specified with `-i` flag, or otherwise `I` was set to 0
initViewer(N, D, M, simulate); // The simulation step function has been set earlier according to operation mode `M`
// Set where the visualiser will check for particle position data after each iteration
if (M == CUDA) {
setNBodyPositions(d_nbodies); // Device pointer
}
else {
setNBodyPositions(h_nbodies); // Host pointer
}
setActivityMapData(activity_map); // This is where the visualiser will check for activity data after each iteration
startVisualisationLoop();
}
else { // Run the simulation for `I` iterations and output the timing results
switch (M) { // Simulation and timing methods depend on operation mode
case CPU:
clock_t t; // Clock ticks for serial CPU timing
double seconds = 0; // Variable to hold execution timing results
t = clock(); // Starting timestamp. See http://www.cplusplus.com/reference/ctime/clock/
for (unsigned int i = 0; i < I; i++) {
step_serial();
}
t = clock() - t; // Take end timestamp and calculate difference from start in clock ticks
seconds = (double)t / CLOCKS_PER_SEC;
printf("Execution time %d seconds %d milliseconds for %d iterations\n", (int)seconds, (int)((seconds - (int)seconds) * 1000), I);
break;
case OPENMP:
double start, end; // Timestamps for OpenMP timing
double seconds = 0; // Variable to hold execution timing results
start = omp_get_wtime(); // Starting timestamp. See https://www.openmp.org/spec-html/5.0/openmpsu160.html
for (unsigned int i = 0; i < I; i++) {
step_OpenMP();
}
end = omp_get_wtime();
seconds = end - start;
printf("Execution time %d seconds %d milliseconds for %d iterations\n", (int)seconds, (int)((seconds - (int)seconds) * 1000), I);
break;
case CUDA:
cudaEvent_t cuda_start, cuda_stop; // CUDA Event timers
float milliseconds = 0; // Timing results variable (must be `float` type for call to `cudaEventElapsedTime`)
cudaEventCreate(&cuda_start); cudaEventCreate(&cuda_stop); // Create CUDA Events
cudaEventRecord(cuda_start); // Record the start time before calling the kernel launching simulation function
for (unsigned int i = 0; i < I; i++) {
step_CUDA();
}
cudaEventRecord(cuda_stop); // Record the stop time once the simulation has finished
cudaEventSynchronize(cuda_stop); // Ensure stop time has finished recording before continuing
checkCUDAError("Error running simulation kernel\n");
cudaEventElapsedTime(&milliseconds, cuda_start, cuda_stop); // Write the elapsed time to `milliseconds`
printf("Execution time %d seconds %d milliseconds for %d iterations\n", (int)milliseconds / 1000, (int)milliseconds % 1000, I);
cudaEventDestroy(cuda_start); cudaEventDestroy(cuda_stop); // Cleanup CUDA Event timers
/* Copy the device output values in `d_nbodies` to the host memory `h_nbodies` then write to file for validation.
cudaMemcpy(h_nbodies->x, d_nbodies->x, data_column_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_nbodies->y, d_nbodies->y, data_column_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_nbodies->vx, d_nbodies->vx, data_column_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_nbodies->vy, d_nbodies->vy, data_column_size, cudaMemcpyDeviceToHost);
checkCUDAError("Copying final Nbody system state from device to host");
*/
break;
}
}
// Cleanup
if (M == CUDA) {
cudaFree(d_nbodies->x);
cudaFree(d_nbodies->y);
cudaFree(d_nbodies->vx);
cudaFree(d_nbodies->vy);
cudaFree(d_nbodies->m);
cudaFree(d_nbodies);
cudaFree(out_x);
cudaFree(out_y);
cudaFree(activity_map);
checkCUDAError("Freeing memory from device with cudaFree");
}
else { // Whether `out_x`, `out_y`, and `activity_map` are pointers on the host or device depends on operation mode
free(out_x);
free(out_y);
free(activity_map);
}
free(h_nbodies->x);
free(h_nbodies->y);
free(h_nbodies->vx);
free(h_nbodies->vy);
free(h_nbodies->m);
free(h_nbodies);
return 0;
}
/* Functions to perform the main simulation of the Nbody system by updating the state by one time step */
// Serial CPU version
void step_serial(void) {
/* The index `i` is used to iterate over the `N` bodies in the system. For each body `i`, we choose to calculate the
`N-1` interactions of the other bodies `j != i` on `i`, as opposed to the action of `i` on all of the other bodies `j != i`.
When computed in parallel, the former avoids a synchronisation step so that the velocity of each body `i`
can be updated independently of the other threads, reducing idle time. Afterwards, we can also update
the position of body `i` and calculate which histogram bin/activity grid cell the body `i` is in, all within one loop.
This is known as loop jamming or loop fusion. See http://www.it.uom.gr/teaching/c_optimization/tutorial.html
Calculating the histogram contribution of each body is far more efficient than iterating over histogram bins/grid cells
since we exploit the fact that each body can only be in at most one grid cell at a time (D*D times fewer calculations). */
unsigned int i, j; // Counter variables
float ax, ay; // Components of resultant acceleration of a particle as a result of gravitational force
float local_xi, local_yi; // Local position variables to reduce global memory accesses, especially during inner loop
float local_vxi, local_vyi; // Local velocity variables to exchange two global memory reads for one plus two local reads
float x_ji, y_ji; // Components of displacement vector from particle `i` to particle `j`
float dist_ij; // To hold softened distance `sqrt(|r_{ji}|^{2} + eps^{2})` from `i` to `j`
// Reset histogram values to zero with `memset`. See http://www.cplusplus.com/reference/cstring/memset/
memset(activity_map, 0, sizeof(activity_map));
for (i = 0; i < N; i++) { // Iterating over bodies in the Nbody system
ax = 0; // Reset resultant acceleration in `x` direction to zero for new particle
ay = 0; // Reset resultant acceleration in `y` direction to zero for new particle
// Read position data from global memory to the stack
local_xi = h_nbodies->x[i];
local_yi = h_nbodies->y[i];
for (j = 0; j < N; j++) {
if (j == i) { // Skip the calculation when i = j (saves calculation time; could consider branching effects on GPU)
continue;
}
// Calculate displacement from particle `i` to particle `j`, since common expression in force equation
// Using local variables for `x[i]`, `y[i]` here removes a global memory read from each inner loop iteration
x_ji = h_nbodies->x[j] - local_xi;
y_ji = h_nbodies->y[j] - local_yi;
// Calculate distance from `i` to `j` with softening factor since used in denominator of force expression
// Explicit casting required since `sqrt` function expects `double` type input and output; operation execution order
dist_ij = (float)sqrt((double)x_ji * x_ji + (double)y_ji * y_ji + eps_sq);
/* Add unscaled contribution to acceleration due to gravitational force of `j` on `i`
Universal Gravitation: `F_ij = G * m_i * m_j * r_ji / |r_ji|^3` ; Newton's 2nd Law: F_i = m_i * a_i
See top of file for further explanation of calculation, physical background */
ax += h_nbodies->m[j] * x_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
ay += h_nbodies->m[j] * y_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
/* It would be possible to add force/acceleration contributions to `h_nbodies->v` directly within this inner loop.
However this would cause this function to be bound by memory access latency (repeated writes to `h_nbodies->v`).
Therefore we use the temporary/local variables `ax` and `ay` instead */
}
/* Use current velocity, acceleration to calculate position, velocity at next time step, respectively. */
/* Former code uses extra heap memory buffers for velocity, adding extra steps pointer swapping and using more memory
However this implementation scores highly for readability, as it makes the intended outcome clear (no race conditions)
out_x[i] = h_nbodies->x[i] + h_nbodies->vx[i] * dt;
out_y[i] = h_nbodies->y[i] + h_nbodies->vy[i] * dt;
out_vx[i] = h_nbodies->vx[i] + G * ax * dt;
out_vy[i] = h_nbodies->vy[i] + G * ay * dt; */
// Using local velocity variables also reduces global memory reads, but only marginally compared `local_xi`, `local_yi`
local_vxi = h_nbodies->vx[i];
local_vyi = h_nbodies->vy[i];
// More care has to be taken about the order of execution to ensure the output positions are calculated correctly
// Use current velocity to calculate next position
local_xi += local_vxi * dt;
local_yi += local_vyi * dt;
// Now the local position variables hold the new positions and can be used to update the activity map
// Use current acceleration (based on current positions) to calculate the new velocity
// Scale `ax`, `ay` by gravitational constant `G`. See `NBody.h` for definition and comment.
h_nbodies->vx[i] = local_vxi + G * ax * dt; // Write the new velocity back to `h_nbodies->vx[i]`
h_nbodies->vy[i] = local_vyi + G * ay * dt; // Write the new velocity back to `h_nbodies->vy[i]`
// We can update particle velocities in-place without adversely affecting subsequent iterations/other threads
// Update the activity map - a flat array of D*D float values storing normalised particle density values in a 2D grid
// First check whether the new position of particle `i` is within the activity grid [0,1)^{2}
// if ((out_x[i] >= 0) && (out_x[i] < 1) && (out_y[i] >= 0) && (out_y[i] < 1)) {
if ((local_xi >= 0) && (local_xi < 1) && (local_yi >= 0) && (local_yi < 1)) {
// If so, calculate the index of the grid element that particle `i` is in and increment the associated histogram bin
// Multiply position vector by `D` then truncate components to `int` to find position in \{0,...,D-1\}^{2} grid
// Can result in race condition when outer `i` loop parallel as multiple threads could increment at once
// Possible solutions: Critical section; atomic operator; move section outside parallel loop (barrier/master method)
activity_map[D * (int)(D * local_yi) + (int)(D * local_xi)]++; // Linearize the index from 2D grid into 1D array
}
// Write the new position of particle `i` to the output buffers to avoid interfering with subsequent iterations
out_x[i] = local_xi;
out_y[i] = local_yi;
}
// Scale activity map values by `D / N` to normalize the histogram values and then scale by D to increase brightness
const float one_over_N = (float)1 / N; // Store the inverse of global variable `N` as a constant to cache value
for (i = 0; i < D * D; i++) {
activity_map[i] *= one_over_N * D;
}
/* Finally, update the `nbody` data pointers to reference the newly calculated arrays of position data.
We swap the input and output pointers rather than simply overwriting the input pointers because that would result
in losing the original input pointers, losing allocated heap memory addresses and causing a memory leak! */
float* temp; // Declare a temporary pointer to `float` to hold addresses whilst swapping the input and output pointers
temp = h_nbodies->x; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->x = out_x; // Update the `h_nbodies->x` pointer which is used for visualisation, and the next `step` iteration
out_x = temp; // Reset `out_x` to a 'fresh', 'empty' piece of memory
temp = h_nbodies->y; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->y = out_y; // Update the `h_nbodies->y` pointer which is used for visualisation, and the next `step` iteration
out_y = temp; // Reset `out_y` to a distinct piece of 'fresh' and 'empty' memory
}
/* Profiling with Visual Studio's Diagnostic Tools and PerfTips by setting breakpoints to time code segments and using
Debug->Windows->Show Diagnostic Tools https://docs.microsoft.com/en-us/visualstudio/profiling/profiling-feature-tour?view=vs-2019
Shows that as `N` increases the majority of time spent running the programme is spent calling the simulation step function,
and within that the loop over `N` particles (indexed by `i`) occupies most of the compute time rather than the loop over
the activity grid cells. This makes sense since there are far more compute steps within the `i` loop, and generally D will be
much smaller than `N` and is effectively limited in visualisation by screen resolution.
Therefore it is most important to parallelise the outer `i` loop. Further analysis suggests that as `N` increases further,
the majority of time spent within each outer loop is spent iterating over the inner `j` loop of interactions between particles,
so nested parallel loops should also be considered. Amongst all the operations/function calls within each simulation step,
it appears that the call to the `sqrt` function in the inner loop is the most expensive. */
// OpenMP version (For parallel computation on a multicore CPU)
/* Benchmarking results for parallelising outer loop over `i` (on my 4 core personal laptop)
Command Line Arguments | Histogram Race Handling | Scheduling | Execution Time(s)
"16384 16 CPU -i 10" | N/A | N/A | 61.357s, 61.988s
"16384 16 OPENMP -i 10" | Atomic | schedule(static) | 17.184s, 17.624s, 17.713s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 1) | 17.314s, 17.399s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 2) | 17.182s, 17.478s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 4) | 17.225s, 17.363s
"16384 16 OPENMP -i 10" | Atomic | schedule(static, 8) | 17.248s, 17.446s
"16384 16 OPENMP -i 10" | Atomic | schedule(guided) | 15.769s, 16.650s, 16.830s, 17.110s
"16384 16 OPENMP -i 10" | Atomic | schedule(dynamic) | 11.895s, 12.028s, 12.210s
"16384 16 OPENMP -i 10" | Atomic | schedule(dynamic, 2) | 11.841s, 11.997s, 12.266s
"16384 16 OPENMP -i 10" | Atomic | schedule(dynamic, 4) | 11.766s, 12.047s, 12.210s
"16384 16 OPENMP -i 10" | Critical | schedule(static, 4) | 17.341s, 18.007s
"16384 16 OPENMP -i 10" | Critical | schedule(guided) | 16.720s, 16.994s
"16384 16 OPENMP -i 10" | Critical | schedule(dynamic) | 11.617s, 11.888s, 11.999s, 13.054s
"8192 16 CPU -i 100" | N/A | N/A | 153.307s, 154.100s
"8192 16 OPENMP -i 100" | Atomic | schedule(dynamic) | 29.687s, 30.332s
"8192 16 OPENMP -i 100" | Critical | schedule(dynamic) | 28.490s, 30.305s
"2048 16 CPU -i 1000" | N/A | N/A | 95.132s, 95.174s, 95.677s
"2048 16 OPENMP -i 1000" | Atomic | schedule(static) | 29.240s, 29.584s
"2048 16 OPENMP -i 1000" | Atomic | schedule(static, 4) | 29.685s, 30.073s
"2048 16 OPENMP -i 1000" | Atomic | schedule(guided) | 29.357s, 29.521s
"2048 16 OPENMP -i 1000" | Atomic | schedule(dynamic) | 17.910s, 18.774s
"2048 16 OPENMP -i 1000" | Critical | schedule(dynamic) | 18.470s, 18.577s
"512 16 CPU -i 10000" | N/A | N/A | 59.003s, 59.404s
"512 16 OPENMP -i 10000" | Atomic | schedule(dynamic) | 12.734s, 13.735s
"256 16 CPU -i 100000" | N/A | N/A | 148.552s, 149.194s
"256 16 OPENMP -i 100000" | Atomic | schedule(static, 4) | 47.987s, 48.889s
"256 16 OPENMP -i 100000" | Atomic | schedule(dynamic) | 34.568s, 36.429s
"128 16 CPU -i 100000" | N/A | N/A | 37.436s, 37.520s
"128 16 OPENMP -i 100000" | Atomic | schedule(static, 4) | 12.587s, 13.110s
"128 16 OPENMP -i 100000" | Atomic | schedule(guided) | 12.448s, 12.541s
"128 16 OPENMP -i 100000" | Atomic | schedule(dynamic) | 11.717s, 12.071s
"64 16 CPU -i 1000000" | N/A | N/A | 92.477s, 92.617s
"64 16 OPENMP -i 1000000" | Atomic | schedule(static, 4) | 31.302s, 32.235s
"64 16 OPENMP -i 1000000" | Atomic | schedule(guided) | 32.292s, 33.169s
"64 16 OPENMP -i 1000000" | Atomic | schedule(dynamic) | 38.062s, 38.133s */
/* Initial Remarks on parallelising outer loop
The data shows that dynamic scheduling is faster for values of `N` greater than 100 or so, but slower than static scheduling for
smaller values on `N`, with guided scheduling always performing between to static and dynamic scheduling and never optimal.
This is because there is uneven workload amongst threads, which favours dynamic scheduling, but the overhead cost of dynamic
scheduling at runtime becomes a limiting factor for relatively small parallel loops.
A trend of increasing OpenMP performance relative to serial CPU performance as `N` increases can also be seen as the benefits
of parallelism outweigh their overhead costs.
I believe the main source of difference in workload between threads arises from whether the particle `i` lies within
the activity grid or not. If so, a slow (atomic/critical/serial) incrementation of an activity grid cell must occur, which also
involves writing to global memory at an index of the `activity_map` array that cannot be predicted at compile time, but if
the particle `i` lies outside the activity grid this step can be skipped, causing an uneven workload between different threads.
Scheduling approach seems to have a more important impact on performance than chunk size.
On the other hand, it appears that there's no major difference in performance between using a critical section or an atomic
directive to ensure the safe incrementation of the activity grid histogram, perhaps with only a slight leaning towards atomic.
For reference information on the OMP Atomic directive, see the following links:
https://www.openmp.org/spec-html/5.0/openmpsu95.html
https://www.ibm.com/support/knowledgecenter/SSGH2K_13.1.2/com.ibm.xlc131.aix.doc/compiler_ref/prag_omp_atomic.html */
/* Benchmarking results for parallelising inner loop over `j` only (on my 4 core personal laptop)
Command Line Arguments | Acceleration Sum Handling | Scheduling | Execution Time(s)
"8192 16 OPENMP -i 10" | Two Atomic Directives | schedule(static) | 54.309s
"8192 16 OPENMP -i 10" | Two Atomic Directives | schedule(dynamic) | 49.997s
"8192 16 OPENMP -i 10" | Critical Section | schedule(static) | 54.520s
"8192 16 OPENMP -i 10" | Critical Section | schedule(dynamic) | 61.185s
"256 16 OPENMP -i 10000" | Two Atomic Directives | schedule(static) | 57.011s
"256 16 OPENMP -i 10000" | Two Atomic Directives | schedule(static, 4) | 64.039s
"256 16 OPENMP -i 10000" | Two Atomic Directives | schedule(dynamic) | 90.021s
"256 16 OPENMP -i 10000" | Critical Section | schedule(static) | 58.980s
"256 16 OPENMP -i 10000" | Critical Section | schedule(dynamic) | 79.218s */
/* Remarks on parallelising inner loop
The data shows that only parallelising the inner `j` loop over force interactions results in a 3-4x slowdown
compared to the serial CPU version. This is because of repeated overheads setting up small parallel loops within a larger loop.
The story might be different on a machine with more cores (e.g. 16 cores rather than 4 cores), but when compared to the
3-4x speedup over serial implementation from parallelising the outer `i` loop over bodies in the system, it is clear which is
preferred. As a general rule, outer loops should be parallelised first (assuming they run for a reasonable number of iterations).
Final remarks and conclusion
Finally, through testing an implementation of nested parallel loops we find the following performance heirarchy for the given
problem: Parallel outer loop > Serial CPU version > Parallel histogram scaling > Nested parallel loops > Parallel inner loop.
For **nested** parallel loops with dynamic scheduling and atomic directives to avoid race conditions when
1) Incrementing activity map contributions; and 2) Summing acceleration contributions with the inner `j` loop parallel;
given command line arguments "8192 16 OPENMP -i 100", an execution time of 268.809s was recorded, about 75% slower than serial.
In conclusion, we choose to parallelise the force calculation outer loop over `i` iterating over bodies in the system as it
is the best and only loop parallelisation which improves on the serial CPU version (by a respectable 3-6x speedup),
we choose dynamic scheduling since it outperforms static and guided scheduling for values of `N` greater than around 100, where
many feasible values of `N` lie (a separate parallel directive to choose static scheduling when N < 100 could be considered).
Finally, to avoid race conditions when each thread uses the position of its local particle to update the shared `activity_map`
histogram, we choose to use an atomic directive, though this only appears to be negligably better than a critical section. */
void step_OpenMP(void) {
/* The index `i` is used to iterate over the `N` bodies in the system. For each body `i`, we choose to calculate the
`N-1` interactions of the other bodies `j != i` on `i`, as opposed to the action of `i` on all of the other bodies `j != i`.
This is because the latter requires an extra synchronisation step before the velocity of each body `i` can be calculated,
increasing thread idle time. Subsequently, we can also update the position of body `i` and calculate its activity grid
position within the same parallel loop, reducing overhead. This is known as loop jamming or loop fusion.
See http://www.it.uom.gr/teaching/c_optimization/tutorial.html
Calculating the histogram contribution of each body is far more efficient than iterating over histogram bins/grid cells
since we exploit the fact that each body can only be in at most one grid cell at a time (D*D times fewer calculations). */
int i, j; // Counter variables. OpenMP requires these to be `int` type rather than unsigned
float ax, ay; // Components of resultant acceleration of a particle as a result of gravitational force
float local_xi, local_yi; // Local position variables to reduce global memory accesses, especially during inner loop
float local_vxi, local_vyi; // Local velocity variables to exchange two global memory reads for one plus two local reads
float x_ji, y_ji; // Components of displacement vector from particle `i` to particle `j`
float dist_ij; // To hold softened distance `sqrt(|r_{ji}|^{2} + eps^{2})` from `i` to `j`
// Reset histogram values to zero with `memset`. See http://www.cplusplus.com/reference/cstring/memset/
memset(activity_map, 0, sizeof(activity_map));
//omp_set_nested(1);
#pragma omp parallel for default(none) private(i, j, ax, ay, local_xi, local_yi, x_ji, y_ji, dist_ij, local_vxi, local_vyi) shared(h_nbodies, activity_map, D, out_x, out_y) schedule(dynamic)
for (i = 0; i < N; i++) { // Iterating over bodies in the Nbody system
ax = 0; // Reset resultant acceleration in `x` direction to zero for new particle
ay = 0; // Reset resultant acceleration in `y` direction to zero for new particle
// Read position data from global memory to the stack
local_xi = h_nbodies->x[i];
local_yi = h_nbodies->y[i];
// Can treat `i` as a shared variable on the inner `j` loop since we read without changing within each outer loop iteration
// Otherwise could use `firstprivate(i)` declaration to pass in the value to each thread
//#pragma omp parallel for default(none) private(j, x_ji, y_ji, dist_ij) shared(i, ax, ay, local_xi, local_yi, h_nbodies) schedule(dynamic)
for (j = 0; j < N; j++) {
if (j == i) { // Skip the calculation when i = j (saves calculation time; could consider branching effects on GPU)
continue;
}
// Calculate displacement from particle `i` to particle `j`, since common expression in force equation
// Using local variables for `x[i]`, `y[i]` here removes a global memory read from each inner loop iteration
x_ji = h_nbodies->x[j] - local_xi;
y_ji = h_nbodies->y[j] - local_yi;
// Calculate distance from `i` to `j` with softening factor since used in denominator of force expression
// Explicit casting required since `sqrt` function expects `double` type input and output; operation execution order
dist_ij = (float)sqrt((double)x_ji * x_ji + (double)y_ji * y_ji + eps_sq);
/* Add unscaled contribution to acceleration due to gravitational force of `j` on `i`
Universal Gravitation: `F_ij = G * m_i * m_j * r_ji / |r_ji|^3` ; Newton's 2nd Law: F_i = m_i * a_i
See top of file for further explanation of calculation, physical background */
// If the inner `j` loop is parallel, adding to `ax[i]` will result in a race condition.
// Could try a reduction directive for `ax`, `ay` in the parallel inner loop directive if supported by OpenMP 2.0
//#pragma omp critical {
ax += h_nbodies->m[j] * x_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
ay += h_nbodies->m[j] * y_ji / (dist_ij * dist_ij * dist_ij); // Need to scale by `G` later
//}
/* It would be possible to add force/acceleration contributions to `h_nbodies->v` directly within this inner loop.
However this would cause this function to be bound by memory access latency (repeated writes to `h_nbodies->v`).
Therefore we use the temporary/local variables `ax` and `ay` instead */
}
/* Use current velocity, acceleration to calculate position, velocity at next time step, respectively. */
/* Former code uses extra heap memory buffers for velocity, adding extra steps pointer swapping and using more memory
However this implementation scores highly for readability, as it makes the intended outcome clear (no race conditions)
out_x[i] = h_nbodies->x[i] + h_nbodies->vx[i] * dt;
out_y[i] = h_nbodies->y[i] + h_nbodies->vy[i] * dt;
out_vx[i] = h_nbodies->vx[i] + G * ax * dt;
out_vy[i] = h_nbodies->vy[i] + G * ay * dt; */
// Using local velocity variables also reduces global memory reads, but only marginally compared `local_xi`, `local_yi`
local_vxi = h_nbodies->vx[i];
local_vyi = h_nbodies->vy[i];
// More care has to be taken about the order of execution to ensure the output positions are calculated correctly
// Use current velocity to calculate next position
local_xi += local_vxi * dt;
local_yi += local_vyi * dt;
// Now the local position variables hold the new positions and can be used to update the activity map
// Use current acceleration (based on current positions) to calculate the new velocity
// Scale `ax`, `ay` by gravitational constant `G`. See `NBody.h` for definition and comment.
h_nbodies->vx[i] = local_vxi + G * ax * dt; // Write the new velocity back to `h_nbodies->vx[i]`
h_nbodies->vy[i] = local_vyi + G * ay * dt; // Write the new velocity back to `h_nbodies->vy[i]`
// We can update particle velocities in-place without adversely affecting subsequent iterations/other threads
// Update the activity map - a flat array of D*D float values storing normalised particle density values in a 2D grid
// First check whether the new position of particle `i` is within the activity grid [0,1)^{2}
// if ((out_x[i] >= 0) && (out_x[i] < 1) && (out_y[i] >= 0) && (out_y[i] < 1)) {
if ((local_xi >= 0) && (local_xi < 1) && (local_yi >= 0) && (local_yi < 1)) {
// If so, calculate the index of the grid element that particle `i` is in and increment the associated histogram bin
// Multiply position vector by `D` then truncate components to `int` to find position in \{0,...,D-1\}^{2} grid
// Can result in race condition when outer `i` loop parallel as multiple threads could increment at once
// Possible solutions: Critical section; atomic operator; move section outside parallel loop (barrier/master method)
/* Atomic operations can be used to safely increment a shared numeric value; critical regions have other uses too */
#pragma omp atomic
activity_map[D * (int)(D * local_yi) + (int)(D * local_xi)]++; // Linearize the index from 2D grid into 1D array
}
// Write the new position of particle `i` to the output buffers to avoid interfering with other threads/iterations
out_x[i] = local_xi;
out_y[i] = local_yi;
}
// Scale activity map values by `D / N` to normalize the histogram values and then scale by D to increase brightness
const float one_over_N = (float)1 / N; // Store the inverse of global variable `N` as a constant to cache value
/* Parallelising this histogram scaling loop actually has a negative impact on performance due to fork/join overheads
outweighing the small gains from parallelising a non-compute intensive loop. Using command line arguments (release mode)
"2048 1024 OPENMP -i 100" we reliably time 9.8 seconds for serial execution vs 11.5s-11.9s with this loop parallel
and using static or guided scheduling (chunk size has little effect) and 14.5s-14.9s for dynamic scheduling.
The reason dynamic scheduling is even slower than static scheduling is the extra runtime overheads of dynamic scheduling
where the workloads are extremely uniform (two multiplications per loop) */
//#pragma omp parallel for default(none) private(i) shared(activity_map, one_over_N, D) schedule(dynamic)
for (i = 0; i < D * D; i++) {
activity_map[i] *= one_over_N * D;
}
/* Finally, update the `nbody` data pointers to reference the newly calculated arrays of position data.
We swap the input and output pointers rather than simply overwriting the input pointers because that would result
in losing the original input pointers, losing allocated heap memory addresses and causing a memory leak! */
float* temp; // Declare a temporary pointer to `float` to hold addresses whilst swapping the input and output pointers
temp = h_nbodies->x; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->x = out_x; // Update the `h_nbodies->x` pointer which is used for visualisation, and the next `step` iteration
out_x = temp; // Reset `out_x` to a 'fresh', 'empty' piece of memory
temp = h_nbodies->y; // Keep track of the old input pointer for later use so we don't lose any allocated memory
h_nbodies->y = out_y; // Update the `h_nbodies->y` pointer which is used for visualisation, and the next `step` iteration
out_y = temp; // Reset `out_y` to a distinct piece of 'fresh' and 'empty' memory
}
// CUDA version (for parallel computation on GPU)
void step_CUDA(void) {
/* This host function sets up kernel launch parameters and launches GPU kernel(s) to calculate one simulation step */
// First reset histogram values to zero with `cudaMemset`.
// See https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html for documentation
cudaMemset(activity_map, 0, sizeof(activity_map));
// Prepare kernel launch parameters
// Ensure we have the minimum number of blocks needed for total threads to exceed `N`
unsigned int blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// Run the kernel
simulation_kernel << <blocks, THREADS_PER_BLOCK >> > (d_nbodies, out_x, out_y, activity_map, N, D);
//checkCUDAError("Error running simulation kernel\n");
/* New velocities and activity map data have been calculated in-place by the call to `simulation_kernel`, whilst new
position data has been written to the buffers `out_x`, `out_y`. We must update the `d_nbodies` data pointers accordingly. */
swap_float_pointers(&d_nbodies->x, &out_x);
swap_float_pointers(&d_nbodies->y, &out_y);
}
/* Functions for parsing Command Line Arguments
The expected arguments are: "nbody.exe N D M [-i I] [-f input_file]" */
void print_help() {
printf("USAGE: \"nbody.exe N D M [-i I] [-f input_file]\", where\n");
printf(" N is the number of bodies to simulate.\n");
printf(" D is the integer dimension of the activity grid. The Grid has D*D locations.\n");
printf(" M is the operation mode, either `CPU` or `OPENMP`\n");
printf(" [-i I] [OPTIONAL] Specifies number `I` of simulation iterations to perform. Visualisation mode is used when `-i` flag not set.\n");
printf("[-f input_file] [OPTIONAL] Specifies an input file with an initial `N` bodies of data. A random initial state will be generated when `-f` flag not set.\n");
}
void parseNDM(const char* argv[3]) {
N = parse_str_as_uint(argv[0]);
checkLastError("Error parsing argument for `N` to `int`");
if (N == 0) {
fprintf(stderr, "Error: Argument \"%s\" for number of bodies `N` parsed as 0.\n", argv[0]);
print_help();
exit(EXIT_FAILURE);
}
D = parse_str_as_uint(argv[1]);
checkLastError("Error parsing argument for `D` to `int`");
if (strcmp(argv[2], "CPU") == 0) {
M = CPU;
}
else if (strcmp(argv[2], "OPENMP") == 0) {
M = OPENMP;
}
else if (strcmp(argv[2], "CUDA") == 0) {
M = CUDA;
}
else {
fprintf(stderr, "Error: Unexpected value %s for operation mode `M` (case sensitive).\n", argv[3]);
print_help();
exit(EXIT_FAILURE);
}
}
void parse_one_option(const char* options[2]) {
if (strcmp(options[0], "-i") == 0) {
I = parse_str_as_uint(options[1]);
checkLastError("Error parsing argument for `I` to `int`");
}
else if (strcmp(options[0], "-f") == 0) {
f_flag = 5;
}
else { // Invalid option flag
fprintf(stderr, "Error: Unexpected optional arguments/flags received.\n");
print_help();
exit(EXIT_FAILURE);
}
}
void parse_two_options(const char* options[4]) {
if ((strcmp(options[0], "-i") == 0) && (strcmp(options[2], "-f") == 0)) {
I = parse_str_as_uint(options[1]);
checkLastError("Error parsing argument for `I` to `int`");
f_flag = 7;
}
else if ((strcmp(options[0], "-f") == 0) && (strcmp(options[2], "-i") == 0)) {
I = parse_str_as_uint(options[3]);
checkLastError("Error parsing argument for `I` to `int`");
f_flag = 5;
}
else { // Invalid option flag combination
fprintf(stderr, "Error: Unexpected combination of optional arguments/flags received.\n");
print_help();
exit(EXIT_FAILURE);
}
}
unsigned int parse_str_as_uint(const char * str) {
if (isdigit(str[0]) == 0) { // In particular, this excludes leading minus sign/negative input values.
fprintf(stderr, "Error parsing %s as `int`: First char not decimal digit (negative values not permitted).\n", str);
print_help();
exit(EXIT_FAILURE);
}
unsigned int val; // To hold parsed `unsigned int` value
char* pEnd; // Pointer to first character after number in `str`
val = (unsigned int)strtol(str, &pEnd, 10); // Convert string to long integer in base 10. Set `pEnd`.
if (pEnd[0] != '\0') { // Check for extra characters in `str` after initial number (can include decimal point)
fprintf(stderr, "Error: Unexpected characters in string %s when parsing to `int`.\n", str);
print_help();
exit(EXIT_FAILURE);
}
return val;
}
/* Functions for reading input files */
void read_nbody_file(const char* filename, const int N) {
FILE* f; // Input file handle
char line_buffer[BUFFER_SIZE]; // Buffer to hold lines read from file
char* ptr_ch = NULL; // Pointer to track character position when reading `line_buffer` string
int line_number = 0; // Keep track of line number for error messaging
int body_count = 0; // Count of number of body data lines read to ensure it matches `N`
f = fopen(filename, "r"); // Open the file in read-only mode
if (f == NULL) {
fprintf(stderr, "Error opening file '%s' for reading\n", filename);
exit(EXIT_FAILURE);
}
/* Read file line by line with `fgets` function. See http://www.cplusplus.com/reference/cstdio/fgets/ for reference
Reads from file into buffer until (soonest of) either `\n` or `EOF` is read, or `BUFFER_SIZE-1` characters read */
while (fgets(line_buffer, BUFFER_SIZE, f) != NULL) {
line_number++; // Increment count of lines read
if (line_buffer[0] == '#') { // If first char in line is `#` skip to next line to ignore comments
continue;
}
if (line_buffer[strlen(line_buffer) - 1] != '\n') { // If last char read from file is not '\n', the line is too long
// This checks that a full line of data was written from file to buffer when not a comment line
fprintf(stderr, "Error reading line %u: Line length exceeds buffer size of %d characters\n", line_number, BUFFER_SIZE);
exit(EXIT_FAILURE);
}
/* Read the line of data into `h_nbodies`, using comma character `,` as delimiter to separate data values
This could be considered as an unrolled while loop over commas counted using `strchr` calls with nontrivial control flow
The use of `ptr_ch` as a separate variable from `line_buffer` could probably be removed. */
ptr_ch = line_buffer; // Place `ptr_ch` at the start of the line to be read
/* Use `strchr` to search through the line starting at position `ptr_ch` to find the next comma `,` character
Returns `NULL` pointer if no comma `,` character found in line after position `ptr_ch`
See http://www.cplusplus.com/reference/cstring/strchr/ for reference */
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after first data value
fprintf(stderr, "Error reading line %u: No data delimiters (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { // This appears to be a valid data line. Don't write past memory bounds for `h_nbodies`!
if (body_count > N-1) { // Throw an error if we have already read `N` or more data rows
fprintf(stderr, "Error reading line %u: Num bodies in file exceeds input N (%d)\n", line_number, N);
exit(EXIT_FAILURE);
}
/* Read `float x` value or randomly generate if data missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `x` value, then update `ptr_ch` to point to end of number
h_nbodies->x[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `x` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `x` data to float\n", line_number);
exit(EXIT_FAILURE);
}
}
else { // Decide data missing or corrupted - means we ignore strings like ".5" and "-.2"
h_nbodies->x[body_count] = (float)rand() / RAND_MAX; // Random position in [0,1]
}
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after the 1st comma
}
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after second data value
fprintf(stderr, "Error reading line %u: Only 1 data delimiter (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { /* Read `float y` value or randomly generate if missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `y` value, then update `ptr_ch` to point to end of number
h_nbodies->y[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `y` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `y` data to float\n", line_number);
exit(EXIT_FAILURE);
}
}
else { // Decide data missing or corrupted - means we ignore strings like ".5" and "-.2"
h_nbodies->y[body_count] = (float)rand() / RAND_MAX; // Random position in [0,1]
}
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after 2nd comma
}
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after third data value
fprintf(stderr, "Error reading line %u: Only 2 data delimiters (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { /* Read `float vx` value or set to zero if missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `vx` value, then update `ptr_ch` to point to end of number
h_nbodies->vx[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `vx` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `vx` data to float\n", line_number);
exit(EXIT_FAILURE);
}
} // Otherwise decide data is missing or corrupted - means strings like ".5" and "-.2" are ignored
// In this case we don't change `vx` since velocity array filled with zeroes by default
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after 3rd comma
}
if ((strchr(ptr_ch, ',') == NULL)) { // Check for comma after fourth data value
fprintf(stderr, "Error reading line %u: Only 3 data delimiters (`,`) detected\n", line_number);
exit(EXIT_FAILURE);
}
else { /* Read `float vy` value or set to zero if missing */
// Move `ptr_ch` past any whitespace, then check if the string starts with `[+-]?[0-9]+`
while (isspace(ptr_ch[0])) {
ptr_ch++;
}
// If string matches `[+-]?[0-9]+.*` after preceding whitespace, parse with `strtod`
if (isdigit(ptr_ch[0]) || (((ptr_ch[0] == '+') || (ptr_ch[0] == '-')) && isdigit(ptr_ch[1]))) {
// Parse and store `vx` value, then update `ptr_ch` to point to end of number
h_nbodies->vy[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing `vy` data to `float`");
// Check there are no further digits before the comma at `strchr(ptr_ch, ',')`
if ((strpbrk(ptr_ch, "0123456789") < strchr(ptr_ch, ',')) && (strpbrk(ptr_ch, "0123456789") != NULL)) {
fprintf(stderr, "Error reading line %u: Unexpected format when parsing `vy` data to float\n", line_number);
exit(EXIT_FAILURE);
}
} // Otherwise decide data is missing or corrupted - means strings like ".5" and "-.2" are ignored
// In this case we don't change `vy` since velocity array filled with zeroes by default
ptr_ch = strchr(ptr_ch, ',') + 1; // Update `ptr_ch` to start after 4th comma
}
if ((strchr(ptr_ch, ',') != NULL)) { // Ensure no more commas after fifth data value
fprintf(stderr, "Error reading line %u: Too many data columns detected (5 expected)\n", line_number);
exit(EXIT_FAILURE);
}
else { // Else read from after the 4th/last comma (`ptr_ch`) to the end of the line
/* Read `float m` value or set to 1/N if data missing, corrupted, or zero (no massless bodies) */
if (strtod(ptr_ch, NULL) == 0) { // If zero returned, then input data was either missing, corrupted, or zero
fprintf(stderr, "Error reading line %u: Mass data missing, corrupted, or set to zero. Replacing with default value (1/N) to avoid massless bodies\n", line_number);
// Set mass to 1/N to avoid creating massless objects (and divide-by-zero problems later)
h_nbodies->m[body_count] = (float)1 / N; // Mass distributed equally among N bodies
}
else { // Otherwise non-zero `float` value for mass read successfully, so write to `m`
// Parse and store `m` value, then update `ptr_ch` to point to end of number
h_nbodies->m[body_count] = (float)strtod(ptr_ch, &ptr_ch);
checkLastError("Error parsing mass data to `float`");
if (strpbrk(ptr_ch, "0123456789") != NULL) { // Check there are no further digits before the end of the line
fprintf(stderr, "Error reading line %u: Unexpected format when parsing mass data\n", line_number);
exit(EXIT_FAILURE);
}
}
} // One line of nbody data has been read successfully. Increment the body count.
body_count++;
// Read new line if not end of file. Thus data file can be terminated with single empty line.
}
if (body_count != N) { // Check fails when fewer than N bodies in file
fprintf(stderr, "Error: Num bodies in file (%u) does not match input N (%d)\n", body_count, N);
exit(EXIT_FAILURE);
}
fclose(f);
}
void checkLastError(const char* msg) {
if (errno != 0) {
perror(msg);
print_help();
exit(EXIT_FAILURE);
}
}
void checkCUDAError(const char* msg) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
dfb1f96125c12fba43f3edb5321506a885937afd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void dotproduct_precompiled_MapKernel_mult(float* skepu_output, float *a, float *b, size_t w2, size_t w3, size_t w4, size_t n, size_t base)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t gridSize = blockDim.x * gridDim.x;
while (i < n)
{
auto res = skepu_userfunction_skepu_skel_2tmp_mult::CU(a[i], b[i]);
skepu_output[i] = res;
i += gridSize;
}
}
| dfb1f96125c12fba43f3edb5321506a885937afd.cu |
__global__ void dotproduct_precompiled_MapKernel_mult(float* skepu_output, float *a, float *b, size_t w2, size_t w3, size_t w4, size_t n, size_t base)
{
size_t i = blockIdx.x * blockDim.x + threadIdx.x;
size_t gridSize = blockDim.x * gridDim.x;
while (i < n)
{
auto res = skepu_userfunction_skepu_skel_2tmp_mult::CU(a[i], b[i]);
skepu_output[i] = res;
i += gridSize;
}
}
|
9d622e2cea7c69291887a6c08f0751509fe7752d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include "CudaDevInfo.h"
#include "cuda_sub_sqr.h"
#include "cc_sub_sqr.h"
#include "fast_heap.h"
template <class T>
T max_abs_vec_diff(size_t n_vec, //!< no of elements
const T *vec1, //!< The vector
const T *vec2 //!< The vector
) {
T mx = fabs(double(vec1[0]) - double(vec2[0]));
size_t i;
for(i=1; i<n_vec; i++) {
T df = fabs(double(vec1[i]) - double(vec2[i]));
if(df > mx)
mx = df;
}
return mx;
};
int main(int argc, const char *argv[]) {
int err = EXIT_SUCCESS;
int nv;
size_t n_vec;
const double dbl_sb = double(rand())/double(RAND_MAX);
const float flt_sb = float(dbl_sb);
size_t size_d, size_f;
size_t i;
if(argc!=2 || sscanf(argv[1],"%d", &nv) != 1 || nv<= 0) {
fprintf(stderr, "USAGE:\n"
" %s <N> (<N> > 0)\n"
"The functions creates a random number sbval and random vector vec of lenght N.\n"
"For each element vec[i] it computes (vec[i]-sbval)^2 with and without the GPU\n"
"and reports the error. This is repeated for double and float computation\n",
argv[0]);
exit(EXIT_FAILURE);
}
n_vec = size_t(nv);
switch(err) {
case hipSuccess:
break;
case hipErrorInvalidDevice:
fprintf(stderr,"h_cuda_init error: INvalid Device\n");
exit(err);
default:
fprintf(stderr, "Unexpected error in h_cuda_init: %d\n", err);
exit(err);
}
// Memory allocation
size_d = n_vec * sizeof(double);
GenericHeapElement &pdbl = fast_heap->get(size_d);
double *dbl = static_cast<double *>(*pdbl);
GenericHeapElement &ph_dbl_cpu = h_fast_heap->get(size_d);
double *h_dbl_gpu = static_cast<double *>(*ph_dbl_cpu);
GenericHeapElement &pd_dbl_gpu = d_fast_heap->get(size_d);
double *d_dbl_gpu = static_cast<double *>(*pd_dbl_gpu);
GenericHeapElement &pd_dbl_sb = d_fast_heap->get(sizeof(double));
double *d_dbl_sb = static_cast<double *>(*pd_dbl_sb);
GenericHeapElement &pdbl_cpu = fast_heap->get(size_d);
double *dbl_cpu = static_cast<double *>(*pdbl_cpu);
size_f = n_vec * sizeof(float);
GenericHeapElement &pflt = fast_heap->get(size_f);
float *flt = static_cast<float *>(*pflt);
GenericHeapElement &ph_flt_cpu = h_fast_heap->get(size_f);
float *h_flt_gpu = static_cast<float *>(*ph_flt_cpu);
GenericHeapElement &pd_flt_gpu = d_fast_heap->get(size_f);
float *d_flt_gpu = static_cast<float *>(*pd_flt_gpu);
GenericHeapElement &pd_flt_sb = d_fast_heap->get(sizeof(float));
float *d_flt_sb = static_cast<float *>(*pd_flt_sb);
GenericHeapElement &pflt_cpu = fast_heap->get(size_f);
float *flt_cpu = static_cast<float *>(*pflt_cpu);
flt = (float *) malloc(size_f);
h_flt_gpu = (float *) malloc(size_f);
gpuErrChk(hipMalloc(&d_flt_gpu, size_f),"cuda_sub_sqr_tst:CudaError", "");
gpuErrChk(hipMalloc(&d_flt_sb, sizeof(float)),"cuda_sub_sqr_tst:CudaError", "");
flt_cpu = (float *) malloc(size_f);
// Initialize random data
for(i=0; i<n_vec; i++) {
dbl[i] = double(rand())/double(RAND_MAX);
flt[i] = float(dbl[i]);
}
// Double test
gpuErrChk(hipMemcpy(d_dbl_gpu, dbl, size_d, hipMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
gpuErrChk(hipMemcpy(d_dbl_sb, &dbl_sb, sizeof(dbl_sb), hipMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<double>(d_dbl_sb, d_dbl_gpu, n_vec, d_dbl_gpu);
gpuErrChk(hipMemcpy(h_dbl_gpu, d_dbl_gpu, size_d, hipMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr(dbl_sb, dbl, n_vec, dbl_cpu);
printf("Max. Abs. difference in double computation using reference: %8.3g\n",
max_abs_vec_diff<double>(n_vec, dbl_cpu, h_dbl_gpu));
gpuErrChk(hipMemcpy(d_dbl_gpu, dbl, size_d, hipMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<double>(dbl_sb, d_dbl_gpu, n_vec, d_dbl_gpu);
gpuErrChk(hipMemcpy(h_dbl_gpu, d_dbl_gpu, size_d, hipMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr(dbl_sb, dbl, n_vec, dbl_cpu);
printf("Max. Abs. difference in double computation: using direct value %8.3g\n",
max_abs_vec_diff<double>(n_vec, dbl_cpu, h_dbl_gpu));
// float test
gpuErrChk(hipMemcpy(d_flt_gpu, flt, size_f, hipMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
gpuErrChk(hipMemcpy(d_flt_sb, &flt_sb, sizeof(flt_sb), hipMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<float>(d_flt_sb, d_flt_gpu, n_vec, d_flt_gpu);
gpuErrChk(hipMemcpy(h_flt_gpu, d_flt_gpu, size_f, hipMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr<float>(flt_sb, flt, n_vec, flt_cpu);
printf("Max Abs. difference in float computation using reference: %8.3g\n",
max_abs_vec_diff<float>(n_vec, flt_cpu, h_flt_gpu));
gpuErrChk(hipMemcpy(d_flt_gpu, flt, size_f, hipMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<float>(flt_sb, d_flt_gpu, n_vec, d_flt_gpu);
gpuErrChk(hipMemcpy(h_flt_gpu, d_flt_gpu, size_f, hipMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr<float>(flt_sb, flt, n_vec, flt_cpu);
printf("Max Abs. difference in float computation using direct value: %8.3g\n",
max_abs_vec_diff<float>(n_vec, flt_cpu, h_flt_gpu));
// Free memory
pdbl.discard(); ph_dbl_cpu.discard(); pd_dbl_gpu.discard(); pd_dbl_sb.discard(); pdbl_cpu.discard();
pflt.discard(); ph_flt_cpu.discard(); pd_flt_gpu.discard(); pd_flt_sb.discard(); pflt_cpu.discard();
return EXIT_SUCCESS;
}
| 9d622e2cea7c69291887a6c08f0751509fe7752d.cu | #include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include "CudaDevInfo.h"
#include "cuda_sub_sqr.h"
#include "cc_sub_sqr.h"
#include "fast_heap.h"
template <class T>
T max_abs_vec_diff(size_t n_vec, //!< no of elements
const T *vec1, //!< The vector
const T *vec2 //!< The vector
) {
T mx = fabs(double(vec1[0]) - double(vec2[0]));
size_t i;
for(i=1; i<n_vec; i++) {
T df = fabs(double(vec1[i]) - double(vec2[i]));
if(df > mx)
mx = df;
}
return mx;
};
int main(int argc, const char *argv[]) {
int err = EXIT_SUCCESS;
int nv;
size_t n_vec;
const double dbl_sb = double(rand())/double(RAND_MAX);
const float flt_sb = float(dbl_sb);
size_t size_d, size_f;
size_t i;
if(argc!=2 || sscanf(argv[1],"%d", &nv) != 1 || nv<= 0) {
fprintf(stderr, "USAGE:\n"
" %s <N> (<N> > 0)\n"
"The functions creates a random number sbval and random vector vec of lenght N.\n"
"For each element vec[i] it computes (vec[i]-sbval)^2 with and without the GPU\n"
"and reports the error. This is repeated for double and float computation\n",
argv[0]);
exit(EXIT_FAILURE);
}
n_vec = size_t(nv);
switch(err) {
case cudaSuccess:
break;
case cudaErrorInvalidDevice:
fprintf(stderr,"h_cuda_init error: INvalid Device\n");
exit(err);
default:
fprintf(stderr, "Unexpected error in h_cuda_init: %d\n", err);
exit(err);
}
// Memory allocation
size_d = n_vec * sizeof(double);
GenericHeapElement &pdbl = fast_heap->get(size_d);
double *dbl = static_cast<double *>(*pdbl);
GenericHeapElement &ph_dbl_cpu = h_fast_heap->get(size_d);
double *h_dbl_gpu = static_cast<double *>(*ph_dbl_cpu);
GenericHeapElement &pd_dbl_gpu = d_fast_heap->get(size_d);
double *d_dbl_gpu = static_cast<double *>(*pd_dbl_gpu);
GenericHeapElement &pd_dbl_sb = d_fast_heap->get(sizeof(double));
double *d_dbl_sb = static_cast<double *>(*pd_dbl_sb);
GenericHeapElement &pdbl_cpu = fast_heap->get(size_d);
double *dbl_cpu = static_cast<double *>(*pdbl_cpu);
size_f = n_vec * sizeof(float);
GenericHeapElement &pflt = fast_heap->get(size_f);
float *flt = static_cast<float *>(*pflt);
GenericHeapElement &ph_flt_cpu = h_fast_heap->get(size_f);
float *h_flt_gpu = static_cast<float *>(*ph_flt_cpu);
GenericHeapElement &pd_flt_gpu = d_fast_heap->get(size_f);
float *d_flt_gpu = static_cast<float *>(*pd_flt_gpu);
GenericHeapElement &pd_flt_sb = d_fast_heap->get(sizeof(float));
float *d_flt_sb = static_cast<float *>(*pd_flt_sb);
GenericHeapElement &pflt_cpu = fast_heap->get(size_f);
float *flt_cpu = static_cast<float *>(*pflt_cpu);
flt = (float *) malloc(size_f);
h_flt_gpu = (float *) malloc(size_f);
gpuErrChk(cudaMalloc(&d_flt_gpu, size_f),"cuda_sub_sqr_tst:CudaError", "");
gpuErrChk(cudaMalloc(&d_flt_sb, sizeof(float)),"cuda_sub_sqr_tst:CudaError", "");
flt_cpu = (float *) malloc(size_f);
// Initialize random data
for(i=0; i<n_vec; i++) {
dbl[i] = double(rand())/double(RAND_MAX);
flt[i] = float(dbl[i]);
}
// Double test
gpuErrChk(cudaMemcpy(d_dbl_gpu, dbl, size_d, cudaMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
gpuErrChk(cudaMemcpy(d_dbl_sb, &dbl_sb, sizeof(dbl_sb), cudaMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<double>(d_dbl_sb, d_dbl_gpu, n_vec, d_dbl_gpu);
gpuErrChk(cudaMemcpy(h_dbl_gpu, d_dbl_gpu, size_d, cudaMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr(dbl_sb, dbl, n_vec, dbl_cpu);
printf("Max. Abs. difference in double computation using reference: %8.3g\n",
max_abs_vec_diff<double>(n_vec, dbl_cpu, h_dbl_gpu));
gpuErrChk(cudaMemcpy(d_dbl_gpu, dbl, size_d, cudaMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<double>(dbl_sb, d_dbl_gpu, n_vec, d_dbl_gpu);
gpuErrChk(cudaMemcpy(h_dbl_gpu, d_dbl_gpu, size_d, cudaMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr(dbl_sb, dbl, n_vec, dbl_cpu);
printf("Max. Abs. difference in double computation: using direct value %8.3g\n",
max_abs_vec_diff<double>(n_vec, dbl_cpu, h_dbl_gpu));
// float test
gpuErrChk(cudaMemcpy(d_flt_gpu, flt, size_f, cudaMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
gpuErrChk(cudaMemcpy(d_flt_sb, &flt_sb, sizeof(flt_sb), cudaMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<float>(d_flt_sb, d_flt_gpu, n_vec, d_flt_gpu);
gpuErrChk(cudaMemcpy(h_flt_gpu, d_flt_gpu, size_f, cudaMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr<float>(flt_sb, flt, n_vec, flt_cpu);
printf("Max Abs. difference in float computation using reference: %8.3g\n",
max_abs_vec_diff<float>(n_vec, flt_cpu, h_flt_gpu));
gpuErrChk(cudaMemcpy(d_flt_gpu, flt, size_f, cudaMemcpyHostToDevice),"cuda_sub_sqr_tst:CudaError", "");
h_sub_sqr<float>(flt_sb, d_flt_gpu, n_vec, d_flt_gpu);
gpuErrChk(cudaMemcpy(h_flt_gpu, d_flt_gpu, size_f, cudaMemcpyDeviceToHost),"cuda_sub_sqr_tst:CudaError", "");
c_sub_sqr<float>(flt_sb, flt, n_vec, flt_cpu);
printf("Max Abs. difference in float computation using direct value: %8.3g\n",
max_abs_vec_diff<float>(n_vec, flt_cpu, h_flt_gpu));
// Free memory
pdbl.discard(); ph_dbl_cpu.discard(); pd_dbl_gpu.discard(); pd_dbl_sb.discard(); pdbl_cpu.discard();
pflt.discard(); ph_flt_cpu.discard(); pd_flt_gpu.discard(); pd_flt_sb.discard(); pflt_cpu.discard();
return EXIT_SUCCESS;
}
|
e3cf3e43d0f028ec73f341b894af8ade7dcac4e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "precomp.cuh"
#include "bitboard.h"
#include "gpuminimax.h"
namespace Checkers
{
namespace GPUMinimax
{
__device__ utility_type explore_black_frontier(GPUBitBoard board, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
GPUBitBoard frontier[32];
int frontier_size = 0;
int v = (node_type == NodeType::MAX) ? -Infinity : Infinity;
int gen_board_type;
utility_type terminal_value = 0;
if (GetBlackUtility(board, terminal_value, depth, turns))
{
return terminal_value;
}
if (node_type == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetBlackJumps(board) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(board) != 0) ? 1 : 0;
}
if (node_type == NodeType::MAX)
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_black_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MAX(explore_black_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v > beta)
{
break;
}
alpha = GET_MAX(alpha, v);
}
}
else
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_white_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MIN(explore_black_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v < alpha)
{
break;
}
beta = GET_MIN(beta, v);
}
}
return v;
}
__global__ void black_kernel(utility_type *v, GPUBitBoard const *boards, int num_boards, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
__shared__ int frontier_size;
__shared__ int gen_board_type;
__shared__ GPUBitBoard frontier[32];
__shared__ utility_type t_v[32];
__shared__ bool terminated;
if (tx == 0)
{
frontier_size = 0;
utility_type terminal_value = 0;
if (terminated = GetBlackUtility(boards[bx], terminal_value, depth, turns))
{
v[bx] = terminal_value;
}
else
{
if ((node_type + 1) == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetBlackJumps(boards[bx]) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(boards[bx]) != 0) ? 1 : 0;
}
}
}
__syncthreads();
if (!terminated)
{
if ((node_type + 1) == NodeType::MAX)
{
gen_black_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
else
{
gen_white_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
__syncthreads();
if (tx < frontier_size)
{
t_v[tx] = explore_black_frontier(frontier[tx], alpha, beta, node_type + 2, depth - 1, turns - 1);
}
__syncthreads();
if ((node_type + 1) == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx == 0)
{
v[bx] = t_v[tx];
}
}
__syncthreads();
if (bx == 0)
{
if (tx < num_boards)
{
t_v[tx] = v[tx];
}
else
{
t_v[tx] = node_type == NodeType::MAX ? -Infinity : Infinity;
}
__syncthreads();
if (node_type == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx < num_boards)
{
v[tx] = t_v[tx];
}
}
__syncthreads();
}
}
} | e3cf3e43d0f028ec73f341b894af8ade7dcac4e9.cu | #include "precomp.cuh"
#include "bitboard.h"
#include "gpuminimax.h"
namespace Checkers
{
namespace GPUMinimax
{
__device__ utility_type explore_black_frontier(GPUBitBoard board, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
GPUBitBoard frontier[32];
int frontier_size = 0;
int v = (node_type == NodeType::MAX) ? -Infinity : Infinity;
int gen_board_type;
utility_type terminal_value = 0;
if (GetBlackUtility(board, terminal_value, depth, turns))
{
return terminal_value;
}
if (node_type == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetBlackJumps(board) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(board) != 0) ? 1 : 0;
}
if (node_type == NodeType::MAX)
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_black_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MAX(explore_black_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v > beta)
{
break;
}
alpha = GET_MAX(alpha, v);
}
}
else
{
// if dynamic parallelism is possible, can call another kernel here
for (int i = 0; i < 32; ++i)
{
gen_white_move[gen_board_type](1u << i, board, frontier, frontier_size);
}
for (int j = 0; j < frontier_size; ++j)
{
v = GET_MIN(explore_black_frontier(frontier[j], alpha, beta, node_type + 1, depth - 1, turns - 1), v);
if (v < alpha)
{
break;
}
beta = GET_MIN(beta, v);
}
}
return v;
}
__global__ void black_kernel(utility_type *v, GPUBitBoard const *boards, int num_boards, utility_type alpha, utility_type beta, NodeType node_type, int depth, int turns)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
__shared__ int frontier_size;
__shared__ int gen_board_type;
__shared__ GPUBitBoard frontier[32];
__shared__ utility_type t_v[32];
__shared__ bool terminated;
if (tx == 0)
{
frontier_size = 0;
utility_type terminal_value = 0;
if (terminated = GetBlackUtility(boards[bx], terminal_value, depth, turns))
{
v[bx] = terminal_value;
}
else
{
if ((node_type + 1) == NodeType::MAX)
{
gen_board_type = (GPUBitBoard::GetBlackJumps(boards[bx]) != 0) ? 1 : 0;
}
else
{
gen_board_type = (GPUBitBoard::GetWhiteJumps(boards[bx]) != 0) ? 1 : 0;
}
}
}
__syncthreads();
if (!terminated)
{
if ((node_type + 1) == NodeType::MAX)
{
gen_black_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
else
{
gen_white_move_atomic[gen_board_type](1u << tx, boards[bx], frontier, &frontier_size);
}
__syncthreads();
if (tx < frontier_size)
{
t_v[tx] = explore_black_frontier(frontier[tx], alpha, beta, node_type + 2, depth - 1, turns - 1);
}
__syncthreads();
if ((node_type + 1) == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx == 0)
{
v[bx] = t_v[tx];
}
}
__syncthreads();
if (bx == 0)
{
if (tx < num_boards)
{
t_v[tx] = v[tx];
}
else
{
t_v[tx] = node_type == NodeType::MAX ? -Infinity : Infinity;
}
__syncthreads();
if (node_type == NodeType::MAX)
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MAX(t_v[tx], t_v[tx + i]);
}
}
}
else
{
for (int i = 1; i < 32; i *= 2)
{
if (tx + i < 32)
{
t_v[tx] = GET_MIN(t_v[tx], t_v[tx + i]);
}
}
}
__syncthreads();
if (tx < num_boards)
{
v[tx] = t_v[tx];
}
}
__syncthreads();
}
}
} |
b744fb1cc035247bffa99fddac7e506e7bc74f86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Operations on GDF column validity bitmasks
*
* @file column.cpp
* ---------------------------------------------------------------------------**/
#include <vector>
#include <cassert>
#include <hipcub/hipcub.hpp>
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/error_utils.h"
#include "utilities/cudf_utils.h"
#include <thrust/tabulate.h>
// To account for if gdf_valid_type is not a 4 byte type,
// compute the RATIO of the number of bytes in gdf_valid_type
// to the 4 byte type being used for casting
using valid32_t = uint32_t;
constexpr size_t RATIO = sizeof(valid32_t) / sizeof(gdf_valid_type);
constexpr int BITS_PER_MASK32 = GDF_VALID_BITSIZE * RATIO;
constexpr int block_size = 256;
/** --------------------------------------------------------------------------*
* @Synopsis Counts the number of valid bits for the specified number of rows
* in the host vector of gdf_valid_type masks
*
* @Param masks The host vector of masks whose bits will be counted
* @Param num_rows The number of bits to count
*
* @Returns The number of valid bits in [0, num_rows) in the host vector of masks
* ----------------------------------------------------------------------------*/
size_t count_valid_bits_host(std::vector<gdf_valid_type> const & masks, int const num_rows)
{
if((0 == num_rows) || (0 == masks.size())){
return 0;
}
size_t count{0};
// Count the valid bits for all masks except the last one
for(size_t i = 0; i < (masks.size() - 1); ++i)
{
gdf_valid_type current_mask = masks[i];
while(current_mask > 0)
{
current_mask &= (current_mask-1) ;
count++;
}
}
// Only count the bits in the last mask that correspond to rows
int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE;
if(num_rows_last_mask == 0)
num_rows_last_mask = GDF_VALID_BITSIZE;
gdf_valid_type last_mask = *(masks.end() - 1);
for(int i = 0; (i < num_rows_last_mask) && (last_mask > 0); ++i)
{
count += (last_mask & gdf_valid_type(1));
last_mask >>= 1;
}
return count;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel to count the number of set bits in a column's validity buffer
*
* The underlying buffer type may only be a 1B type, but it is casted to a 4B
* type (valid32_t) such that __popc may be used to more efficiently count the
* number of set bits. This requires handling the last 4B element as a special
* case as the buffer may not be a multiple of 4 bytes.
*
* @Param[in] masks32 Pointer to buffer (casted as a 4B type) whose bits will be counted
* @Param[in] num_masks32 The number of 4B elements in the buffer
* @Param[in] num_rows The number of rows in the column, i.e., the number of bits
* in the buffer that correspond to rows
* @Param[out] global_count The number of set bits in the range of bits [0, num_rows)
*/
/* ----------------------------------------------------------------------------*/
template <typename size_type>
__global__
void count_valid_bits(valid32_t const * const masks32,
int const num_masks32,
int const num_rows,
size_type * const global_count)
{
using BlockReduce = hipcub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// If the number of rows is not a multiple of 32, then the remaining
// rows need to be handled separtely because not all of its bits correspond
// to rows
int last_mask32{0};
int const num_rows_last_mask{num_rows % BITS_PER_MASK32};
if(0 == num_rows_last_mask)
last_mask32 = num_masks32;
else
last_mask32 = num_masks32 - 1;
int const idx{static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x)};
int cur_mask{idx};
size_type my_count{0};
// Use popc to count the valid bits for the all of the masks
// where all of the bits correspond to rows
while(cur_mask < last_mask32)
{
my_count += __popc(masks32[cur_mask]);
cur_mask += blockDim.x * gridDim.x;
}
// Handle the remainder rows
if(idx < num_rows_last_mask)
{
gdf_valid_type const * const valids{reinterpret_cast<gdf_valid_type const *>(masks32)};
int const my_row{num_rows - idx - 1};
if(true == gdf_is_valid(valids,my_row))
++my_count;
}
// Reduces the count from each thread in a block into a block count
int const block_count{BlockReduce(temp_storage).Sum(my_count)};
// Store the block count into the global count
if(threadIdx.x == 0)
{
atomicAdd(global_count, block_count);
}
}
/* ---------------------------------------------------------------------------*
* @Synopsis Counts the number of valid bits for the specified number of rows
* in a validity bitmask.
*
* @Param[in] masks The validity bitmask buffer in device memory
* @Param[in] num_rows The number of bits to count
* @Param[out] count The number of valid bits in the buffer from [0, num_rows)
*
* @Returns GDF_SUCCESS upon successful completion
*
* ----------------------------------------------------------------------------*/
gdf_error gdf_count_nonzero_mask(gdf_valid_type const * masks, int num_rows, int * count)
{
if((nullptr == masks) || (nullptr == count)){return GDF_DATASET_EMPTY;}
if(0 == num_rows) {return GDF_SUCCESS;}
// Masks will be proccessed as 4B types, therefore we require that the underlying
// type be less than or equal to 4B
assert(sizeof(valid32_t) >= sizeof(gdf_valid_type));
// Number of gdf_valid_types in the validity bitmask
size_t const num_masks{gdf_get_num_chars_bitmask(num_rows)};
// Number of 4 byte types in the validity bit mask
size_t num_masks32{static_cast<size_t>(::ceil(static_cast<float>(num_masks) / RATIO))};
int h_count{0};
if(num_masks32 > 0)
{
// TODO: Probably shouldn't create/destroy the stream every time
hipStream_t count_stream;
CUDA_TRY(hipStreamCreate(&count_stream));
int * d_count{nullptr};
// Cast validity buffer to 4 byte type
valid32_t const * masks32{reinterpret_cast<valid32_t const *>(masks)};
RMM_TRY(RMM_ALLOC((void**)&d_count, sizeof(int), count_stream));
CUDA_TRY(hipMemsetAsync(d_count, 0, sizeof(int), count_stream));
size_t const grid_size{(num_masks32 + block_size - 1)/block_size};
hipLaunchKernelGGL(( count_valid_bits), dim3(grid_size), dim3(block_size),0,count_stream, masks32, num_masks32, num_rows, d_count);
CUDA_TRY( hipGetLastError() );
CUDA_TRY(hipMemcpyAsync(&h_count, d_count, sizeof(int), hipMemcpyDeviceToHost, count_stream));
RMM_TRY(RMM_FREE(d_count, count_stream));
CUDA_TRY(hipStreamSynchronize(count_stream));
CUDA_TRY(hipStreamDestroy(count_stream));
}
assert(h_count >= 0);
assert(h_count <= num_rows);
*count = h_count;
return GDF_SUCCESS;
}
/** ---------------------------------------------------------------------------*
* @brief Concatenate the validity bitmasks of multiple columns
*
* Accounts for the differences between lengths of columns and their bitmasks
* (e.g. because gdf_valid_type is larger than one bit).
*
* @param[out] output_mask The concatenated mask
* @param[in] output_column_length The total length (in data elements) of the
* concatenated column
* @param[in] masks_to_concat The array of device pointers to validity bitmasks
* for the columns to concatenate
* @param[in] column_lengths An array of lengths of the columns to concatenate
* @param[in] num_columns The number of columns to concatenate
* @return gdf_error GDF_SUCCESS or GDF_CUDA_ERROR if there is a runtime CUDA
error
* ---------------------------------------------------------------------------**/
gdf_error gdf_mask_concat(gdf_valid_type *output_mask,
gdf_size_type output_column_length,
gdf_valid_type *masks_to_concat[],
gdf_size_type *column_lengths,
gdf_size_type num_columns)
{
// This lambda is executed in a thrust algorithm. Each thread computes and
// returns one gdf_valid_type element for the concatenated output mask
auto mask_concatenator = [=] __device__ (gdf_size_type mask_index) {
gdf_valid_type output_m = 0;
int cur_mask_index = 0, cur_mask_start = 0;
int cur_mask_len = column_lengths[0];
// Each thread processes one GDF_VALID_BITSIZE worth of valid bits
for (int bit = 0; bit < GDF_VALID_BITSIZE; ++bit)
{
gdf_size_type output_index = mask_index * GDF_VALID_BITSIZE + bit;
// stop when we are beyond the length of the output column (in elements)
if (output_index >= output_column_length) break;
// find the next column's mask when we step past the current column's length
while ( (cur_mask_start + cur_mask_len <= output_index) && (cur_mask_index < num_columns - 1) )
{
cur_mask_start += cur_mask_len;
cur_mask_len = column_lengths[++cur_mask_index];
}
// Set each valid bit at the right location in this thread's output gdf_valid_type
// Note: gdf_is_valid returns true when the input mask is a null pointer
// This makes it behave as if columns with null validity masks have masks of all 1s,
// which is the desired behavior.
gdf_size_type index = output_index - cur_mask_start;
if ( gdf_is_valid(masks_to_concat[cur_mask_index], index) )
{
output_m |= (1 << bit);
}
}
return output_m;
};
// This is like thrust::for_each where the lambda gets the current index into the output array
// as input
thrust::tabulate(thrust::hip::par,
output_mask,
output_mask + gdf_get_num_chars_bitmask(output_column_length),
mask_concatenator);
CUDA_TRY( hipGetLastError() );
return GDF_SUCCESS;
}
| b744fb1cc035247bffa99fddac7e506e7bc74f86.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** ---------------------------------------------------------------------------*
* @brief Operations on GDF column validity bitmasks
*
* @file column.cpp
* ---------------------------------------------------------------------------**/
#include <vector>
#include <cassert>
#include <cub/cub.cuh>
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/error_utils.h"
#include "utilities/cudf_utils.h"
#include <thrust/tabulate.h>
// To account for if gdf_valid_type is not a 4 byte type,
// compute the RATIO of the number of bytes in gdf_valid_type
// to the 4 byte type being used for casting
using valid32_t = uint32_t;
constexpr size_t RATIO = sizeof(valid32_t) / sizeof(gdf_valid_type);
constexpr int BITS_PER_MASK32 = GDF_VALID_BITSIZE * RATIO;
constexpr int block_size = 256;
/** --------------------------------------------------------------------------*
* @Synopsis Counts the number of valid bits for the specified number of rows
* in the host vector of gdf_valid_type masks
*
* @Param masks The host vector of masks whose bits will be counted
* @Param num_rows The number of bits to count
*
* @Returns The number of valid bits in [0, num_rows) in the host vector of masks
* ----------------------------------------------------------------------------*/
size_t count_valid_bits_host(std::vector<gdf_valid_type> const & masks, int const num_rows)
{
if((0 == num_rows) || (0 == masks.size())){
return 0;
}
size_t count{0};
// Count the valid bits for all masks except the last one
for(size_t i = 0; i < (masks.size() - 1); ++i)
{
gdf_valid_type current_mask = masks[i];
while(current_mask > 0)
{
current_mask &= (current_mask-1) ;
count++;
}
}
// Only count the bits in the last mask that correspond to rows
int num_rows_last_mask = num_rows % GDF_VALID_BITSIZE;
if(num_rows_last_mask == 0)
num_rows_last_mask = GDF_VALID_BITSIZE;
gdf_valid_type last_mask = *(masks.end() - 1);
for(int i = 0; (i < num_rows_last_mask) && (last_mask > 0); ++i)
{
count += (last_mask & gdf_valid_type(1));
last_mask >>= 1;
}
return count;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Kernel to count the number of set bits in a column's validity buffer
*
* The underlying buffer type may only be a 1B type, but it is casted to a 4B
* type (valid32_t) such that __popc may be used to more efficiently count the
* number of set bits. This requires handling the last 4B element as a special
* case as the buffer may not be a multiple of 4 bytes.
*
* @Param[in] masks32 Pointer to buffer (casted as a 4B type) whose bits will be counted
* @Param[in] num_masks32 The number of 4B elements in the buffer
* @Param[in] num_rows The number of rows in the column, i.e., the number of bits
* in the buffer that correspond to rows
* @Param[out] global_count The number of set bits in the range of bits [0, num_rows)
*/
/* ----------------------------------------------------------------------------*/
template <typename size_type>
__global__
void count_valid_bits(valid32_t const * const masks32,
int const num_masks32,
int const num_rows,
size_type * const global_count)
{
using BlockReduce = cub::BlockReduce<size_type, block_size>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// If the number of rows is not a multiple of 32, then the remaining
// rows need to be handled separtely because not all of its bits correspond
// to rows
int last_mask32{0};
int const num_rows_last_mask{num_rows % BITS_PER_MASK32};
if(0 == num_rows_last_mask)
last_mask32 = num_masks32;
else
last_mask32 = num_masks32 - 1;
int const idx{static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x)};
int cur_mask{idx};
size_type my_count{0};
// Use popc to count the valid bits for the all of the masks
// where all of the bits correspond to rows
while(cur_mask < last_mask32)
{
my_count += __popc(masks32[cur_mask]);
cur_mask += blockDim.x * gridDim.x;
}
// Handle the remainder rows
if(idx < num_rows_last_mask)
{
gdf_valid_type const * const valids{reinterpret_cast<gdf_valid_type const *>(masks32)};
int const my_row{num_rows - idx - 1};
if(true == gdf_is_valid(valids,my_row))
++my_count;
}
// Reduces the count from each thread in a block into a block count
int const block_count{BlockReduce(temp_storage).Sum(my_count)};
// Store the block count into the global count
if(threadIdx.x == 0)
{
atomicAdd(global_count, block_count);
}
}
/* ---------------------------------------------------------------------------*
* @Synopsis Counts the number of valid bits for the specified number of rows
* in a validity bitmask.
*
* @Param[in] masks The validity bitmask buffer in device memory
* @Param[in] num_rows The number of bits to count
* @Param[out] count The number of valid bits in the buffer from [0, num_rows)
*
* @Returns GDF_SUCCESS upon successful completion
*
* ----------------------------------------------------------------------------*/
gdf_error gdf_count_nonzero_mask(gdf_valid_type const * masks, int num_rows, int * count)
{
if((nullptr == masks) || (nullptr == count)){return GDF_DATASET_EMPTY;}
if(0 == num_rows) {return GDF_SUCCESS;}
// Masks will be proccessed as 4B types, therefore we require that the underlying
// type be less than or equal to 4B
assert(sizeof(valid32_t) >= sizeof(gdf_valid_type));
// Number of gdf_valid_types in the validity bitmask
size_t const num_masks{gdf_get_num_chars_bitmask(num_rows)};
// Number of 4 byte types in the validity bit mask
size_t num_masks32{static_cast<size_t>(std::ceil(static_cast<float>(num_masks) / RATIO))};
int h_count{0};
if(num_masks32 > 0)
{
// TODO: Probably shouldn't create/destroy the stream every time
cudaStream_t count_stream;
CUDA_TRY(cudaStreamCreate(&count_stream));
int * d_count{nullptr};
// Cast validity buffer to 4 byte type
valid32_t const * masks32{reinterpret_cast<valid32_t const *>(masks)};
RMM_TRY(RMM_ALLOC((void**)&d_count, sizeof(int), count_stream));
CUDA_TRY(cudaMemsetAsync(d_count, 0, sizeof(int), count_stream));
size_t const grid_size{(num_masks32 + block_size - 1)/block_size};
count_valid_bits<<<grid_size, block_size,0,count_stream>>>(masks32, num_masks32, num_rows, d_count);
CUDA_TRY( cudaGetLastError() );
CUDA_TRY(cudaMemcpyAsync(&h_count, d_count, sizeof(int), cudaMemcpyDeviceToHost, count_stream));
RMM_TRY(RMM_FREE(d_count, count_stream));
CUDA_TRY(cudaStreamSynchronize(count_stream));
CUDA_TRY(cudaStreamDestroy(count_stream));
}
assert(h_count >= 0);
assert(h_count <= num_rows);
*count = h_count;
return GDF_SUCCESS;
}
/** ---------------------------------------------------------------------------*
* @brief Concatenate the validity bitmasks of multiple columns
*
* Accounts for the differences between lengths of columns and their bitmasks
* (e.g. because gdf_valid_type is larger than one bit).
*
* @param[out] output_mask The concatenated mask
* @param[in] output_column_length The total length (in data elements) of the
* concatenated column
* @param[in] masks_to_concat The array of device pointers to validity bitmasks
* for the columns to concatenate
* @param[in] column_lengths An array of lengths of the columns to concatenate
* @param[in] num_columns The number of columns to concatenate
* @return gdf_error GDF_SUCCESS or GDF_CUDA_ERROR if there is a runtime CUDA
error
* ---------------------------------------------------------------------------**/
gdf_error gdf_mask_concat(gdf_valid_type *output_mask,
gdf_size_type output_column_length,
gdf_valid_type *masks_to_concat[],
gdf_size_type *column_lengths,
gdf_size_type num_columns)
{
// This lambda is executed in a thrust algorithm. Each thread computes and
// returns one gdf_valid_type element for the concatenated output mask
auto mask_concatenator = [=] __device__ (gdf_size_type mask_index) {
gdf_valid_type output_m = 0;
int cur_mask_index = 0, cur_mask_start = 0;
int cur_mask_len = column_lengths[0];
// Each thread processes one GDF_VALID_BITSIZE worth of valid bits
for (int bit = 0; bit < GDF_VALID_BITSIZE; ++bit)
{
gdf_size_type output_index = mask_index * GDF_VALID_BITSIZE + bit;
// stop when we are beyond the length of the output column (in elements)
if (output_index >= output_column_length) break;
// find the next column's mask when we step past the current column's length
while ( (cur_mask_start + cur_mask_len <= output_index) && (cur_mask_index < num_columns - 1) )
{
cur_mask_start += cur_mask_len;
cur_mask_len = column_lengths[++cur_mask_index];
}
// Set each valid bit at the right location in this thread's output gdf_valid_type
// Note: gdf_is_valid returns true when the input mask is a null pointer
// This makes it behave as if columns with null validity masks have masks of all 1s,
// which is the desired behavior.
gdf_size_type index = output_index - cur_mask_start;
if ( gdf_is_valid(masks_to_concat[cur_mask_index], index) )
{
output_m |= (1 << bit);
}
}
return output_m;
};
// This is like thrust::for_each where the lambda gets the current index into the output array
// as input
thrust::tabulate(thrust::cuda::par,
output_mask,
output_mask + gdf_get_num_chars_bitmask(output_column_length),
mask_concatenator);
CUDA_TRY( cudaGetLastError() );
return GDF_SUCCESS;
}
|
a35662f6791a34df6c8ab5d2f8c3eed3ed34a5b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Academic License - for use in teaching, academic research, and meeting
// course requirements at degree granting institutions only. Not for
// government, commercial, or other organizational use.
// File: fog_rectification.cu
//
// GPU Coder version : 1.0
// CUDA/C/C++ source code generated on : 25-Jan-2018 08:58:04
//
// Include Files
#include "rt_nonfinite.h"
#include "fog_rectification.h"
// Variable Definitions
__constant__ real_T const_b[9];
// Function Declarations
static __global__ void fog_rectification_kernel1(const uint8_T *input, real_T
*b_input);
static __global__ void fog_rectification_kernel10(real_T *restoreOut, uint8_T
*b_restoreOut);
static __global__ void fog_rectification_kernel11(const real_T *b, uint8_T
*restoreOut, uint8_T *im_gray);
static __global__ void fog_rectification_kernel12(real_T *localBins3, real_T
*localBins2, real_T *localBins1, real_T *cdf);
static __global__ void fog_rectification_kernel13(real_T *localBins3, real_T
*localBins2, real_T *localBins1, real_T *cdf);
static __global__ void fog_rectification_kernel14(int32_T *y_size, int32_T
*b_y_size, int32_T *ii_size, int32_T *T_size);
static __global__ void fog_rectification_kernel15(int32_T i, real_T *T_data);
static __global__ void fog_rectification_kernel16(uint8_T *restoreOut);
static __global__ void fog_rectification_kernel17(real_T *T_data, uint8_T
*restoreOut, uint8_T *out);
static __global__ void fog_rectification_kernel18(real_T *T_data, uint8_T
*restoreOut, uint8_T *out);
static __global__ void fog_rectification_kernel19(real_T *T_data, uint8_T
*restoreOut, uint8_T *out);
static __global__ void fog_rectification_kernel2(real_T *input, real_T
*darkChannel);
static __global__ void fog_rectification_kernel3(real_T *darkChannel, real_T
*diff_im);
static __global__ void fog_rectification_kernel4(real_T *expanded);
static __global__ void fog_rectification_kernel5(real_T *diff_im, real_T
*expanded);
static __global__ void fog_rectification_kernel6(real_T *expanded, real_T
*diff_im);
static __global__ void fog_rectification_kernel7(real_T *diff_im, real_T *y);
static __global__ void fog_rectification_kernel8(real_T *y, real_T *diff_im,
real_T *darkChannel);
static __global__ void fog_rectification_kernel9(real_T *darkChannel, real_T
*diff_im, real_T *input, real_T *restoreOut);
static __device__ real_T rt_roundd_snf(real_T u);
// Function Definitions
//
// Arguments : uint3 blockArg
// uint3 gridArg
// const uint8_T *input
// real_T *b_input
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel1(const
uint8_T *input, real_T *b_input)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 921600)) {
// restoreOut is used to store the output of restoration
// Changing the precision level of input image to double
b_input[j] = (real_T)input[j] / 255.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *restoreOut
// uint8_T *b_restoreOut
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel10
(real_T *restoreOut, uint8_T *b_restoreOut)
{
real_T cv;
int32_T j;
uint8_T u0;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 921600)) {
cv = rt_roundd_snf(255.0 * restoreOut[j]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
b_restoreOut[j] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// const real_T *b
// uint8_T *restoreOut
// uint8_T *im_gray
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel11(
const real_T *b, uint8_T *restoreOut, uint8_T *im_gray)
{
uint8_T a[3];
int32_T j;
real_T cv;
int32_T n;
uint8_T u0;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// %%%%%% streching performs the histogram streching of the image %%%%%%%
// %%%%%%%% im is the input color image and p is cdf limit
// %%%%% out is the contrast streched image and cdf is the cumulative prob
// %%%%% density function and T is the streching function
// rgbtograyconversion
a[0] = restoreOut[j];
a[1] = restoreOut[j + 307200];
a[2] = restoreOut[j + 614400];
cv = 0.0;
for (n = 0; n < 3; n++) {
cv += (real_T)a[n] * b[n];
}
cv = rt_roundd_snf(cv);
if (cv < 256.0) {
u0 = (uint8_T)cv;
} else {
u0 = MAX_uint8_T;
}
im_gray[j] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *localBins3
// real_T *localBins2
// real_T *localBins1
// real_T *cdf
// Return Type : void
//
static __global__ __launch_bounds__(256, 1) void fog_rectification_kernel12
(real_T *localBins3, real_T *localBins2, real_T *localBins1, real_T *cdf)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 256)) {
// histogram calculation
cdf[j] = 0.0;
localBins1[j] = 0.0;
localBins2[j] = 0.0;
localBins3[j] = 0.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *localBins3
// real_T *localBins2
// real_T *localBins1
// real_T *cdf
// Return Type : void
//
static __global__ __launch_bounds__(256, 1) void fog_rectification_kernel13
(real_T *localBins3, real_T *localBins2, real_T *localBins1, real_T *cdf)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 256)) {
// cumulative Sum calculation
cdf[j] = ((cdf[j] + localBins1[j]) + localBins2[j]) + localBins3[j];
cdf[j] /= 307200.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// int32_T *y_size
// int32_T *b_y_size
// int32_T *ii_size
// int32_T *T_size
// Return Type : void
//
static __global__ __launch_bounds__(32, 1) void fog_rectification_kernel14
(int32_T *y_size, int32_T *b_y_size, int32_T *ii_size, int32_T *T_size)
{
;
;
if (!(int32_T)((int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x *
blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) +
threadIdx.x) >= 1)) {
T_size[0] = 1;
T_size[1] = ((ii_size[0] + b_y_size[1]) + y_size[1]) + 1;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// int32_T i
// real_T *T_data
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel15
(int32_T i, real_T *T_data)
{
int32_T n;
;
;
n = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if ((!(int32_T)(n >= 768)) && ((int32_T)(1 + n <= i))) {
T_data[n] = floor(T_data[n]);
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// uint8_T *restoreOut
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel16
(uint8_T *restoreOut)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if ((!(int32_T)(j >= 921600)) && ((int32_T)((int32_T)restoreOut[j] == 0))) {
// Replacing the value from look up table
restoreOut[j] = 1;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *T_data
// uint8_T *restoreOut
// uint8_T *out
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel17
(real_T *T_data, uint8_T *restoreOut, uint8_T *out)
{
uint32_T threadId;
real_T cv;
int32_T j;
int32_T i0;
uint8_T u0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
cv = rt_roundd_snf(T_data[(int32_T)restoreOut[j + 480 * i0] - 1]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
out[j + 480 * i0] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *T_data
// uint8_T *restoreOut
// uint8_T *out
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel18
(real_T *T_data, uint8_T *restoreOut, uint8_T *out)
{
uint32_T threadId;
real_T cv;
int32_T j;
int32_T i0;
uint8_T u0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
cv = rt_roundd_snf(T_data[(int32_T)restoreOut[307200 + (j + 480 * i0)] - 1]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
out[307200 + (j + 480 * i0)] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *T_data
// uint8_T *restoreOut
// uint8_T *out
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel19
(real_T *T_data, uint8_T *restoreOut, uint8_T *out)
{
uint32_T threadId;
real_T cv;
int32_T j;
int32_T i0;
uint8_T u0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
cv = rt_roundd_snf(T_data[(int32_T)restoreOut[614400 + (j + 480 * i0)] - 1]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
out[614400 + (j + 480 * i0)] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *input
// real_T *darkChannel
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel2
(real_T *input, real_T *darkChannel)
{
real_T cv;
int32_T j;
int32_T n;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// Dark channel Estimation from input
cv = input[j];
for (n = j + 307201; n <= j + 614401; n += 307200) {
if (input[n - 1] < cv) {
cv = input[n - 1];
}
}
darkChannel[j] = cv;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *darkChannel
// real_T *diff_im
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel3
(real_T *darkChannel, real_T *diff_im)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// diff_im is used as input and output variable for anisotropic diffusion
diff_im[j] = 0.9 * darkChannel[j];
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *expanded
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel4
(real_T *expanded)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 309444)) {
expanded[j] = 0.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *diff_im
// real_T *expanded
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel5
(real_T *diff_im, real_T *expanded)
{
uint32_T threadId;
int32_T j;
int32_T i0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
expanded[(j + 482 * (1 + i0)) + 1] = diff_im[j + 480 * i0];
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *expanded
// real_T *diff_im
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void fog_rectification_kernel6
(real_T *expanded, real_T *diff_im)
{
real_T cv;
int32_T n;
int32_T j;
int32_T threadIdY;
int32_T threadIdX;
__shared__ real_T expanded_shared[1156];
int32_T baseR;
int32_T srow;
int32_T strideRow;
int32_T scol;
int32_T strideCol;
int32_T y_idx;
int32_T baseC;
int32_T x_idx;
;
;
threadIdY = (int32_T)(blockDim.y * blockIdx.y + threadIdx.y);
threadIdX = (int32_T)(blockDim.x * blockIdx.x + threadIdx.x);
baseR = threadIdX;
srow = (int32_T)threadIdx.x;
strideRow = (int32_T)blockDim.x;
scol = (int32_T)threadIdx.y;
strideCol = (int32_T)blockDim.y;
for (y_idx = srow; y_idx <= 33; y_idx += strideRow) {
baseC = threadIdY;
for (x_idx = scol; x_idx <= 33; x_idx += strideCol) {
if (((int32_T)(((int32_T)(baseR >= 0)) && ((int32_T)(baseR < 482)))) &&
((int32_T)(((int32_T)(baseC >= 0)) && ((int32_T)(baseC < 642))))) {
expanded_shared[y_idx + 34 * x_idx] = (real_T)expanded[482 * baseC +
baseR];
} else {
expanded_shared[y_idx + 34 * x_idx] = 0.0;
}
baseC += strideCol;
}
baseR += strideRow;
}
__syncthreads();
if ((!(int32_T)(threadIdX >= 480)) && (!(int32_T)(threadIdY >= 640))) {
cv = 0.0;
for (n = 0; n < 3; n++) {
for (j = 0; j < 3; j++) {
cv += expanded_shared[((int32_T)threadIdx.x + ((j + threadIdX) -
threadIdX)) + 34 * ((int32_T)threadIdx.y + ((n + threadIdY) -
threadIdY))] * const_b[(3 * (2 - n) - j) + 2];
}
}
diff_im[threadIdX + 480 * threadIdY] = cv;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *diff_im
// real_T *y
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel7
(real_T *diff_im, real_T *y)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// Reduction with min
y[j] = diff_im[j];
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *y
// real_T *diff_im
// real_T *darkChannel
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel8
(real_T *y, real_T *diff_im, real_T *darkChannel)
{
real_T u1;
int32_T n;
;
;
n = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(n >= 307200)) {
// Parallel element-wise math to compute
// Restoration with inverse Koschmieder's law
u1 = y[n];
if (darkChannel[n] < y[n]) {
u1 = darkChannel[n];
}
diff_im[n] = u1;
diff_im[n] *= 0.6;
darkChannel[n] = 1.0 / (1.0 - diff_im[n]);
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *darkChannel
// real_T *diff_im
// real_T *input
// real_T *restoreOut
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel9
(real_T *darkChannel, real_T *diff_im, real_T *input, real_T *restoreOut)
{
uint32_T threadId;
int32_T j;
int32_T i0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
restoreOut[j + 480 * i0] = (input[j + 480 * i0] - diff_im[j + 480 * i0]) *
darkChannel[j + 480 * i0];
restoreOut[307200 + (j + 480 * i0)] = (input[307200 + (j + 480 * i0)] -
diff_im[j + 480 * i0]) * darkChannel[j + 480 * i0];
restoreOut[614400 + (j + 480 * i0)] = (input[614400 + (j + 480 * i0)] -
diff_im[j + 480 * i0]) * darkChannel[j + 480 * i0];
}
}
//
// Arguments : real_T u
// Return Type : real_T
//
static __device__ real_T rt_roundd_snf(real_T u)
{
real_T y;
if (fabs(u) < 4.503599627370496E+15) {
if (u >= 0.5) {
y = floor(u + 0.5);
} else if (u > -0.5) {
y = u * 0.0;
} else {
y = ceil(u - 0.5);
}
} else {
y = u;
}
return y;
}
//
// Copyright 2017 The MathWorks, Inc.
// Arguments : const uint8_T input[921600]
// uint8_T out[921600]
// Return Type : void
//
void fog_rectification(const uint8_T input[921600], uint8_T out[921600])
{
int32_T idx;
int32_T i0;
int32_T i;
static const real_T b[9] = { 0.0625, 0.125, 0.0625, 0.125, 0.25, 0.125, 0.0625,
0.125, 0.0625 };
static uint8_T im_gray[307200];
static const real_T b_b[3] = { 0.29893602129377539, 0.58704307445112125,
0.11402090425510336 };
real_T cdf[256];
real_T localBins1[256];
real_T localBins2[256];
real_T localBins3[256];
int32_T ii_size[1];
int32_T varargin_1;
int32_T b_ii_size[1];
real_T y;
real_T b_y;
real_T y_data[255];
int32_T y_size[2];
int32_T ndbl;
int16_T i1;
int16_T i2;
real_T c_y;
int32_T absb;
int32_T u0;
uint32_T u1;
int32_T b_y_size[2];
real_T d_y;
real_T e_y;
int32_T T_size[2];
real_T T_data[771];
uint8_T *gpu_input;
real_T *b_gpu_input;
real_T *gpu_darkChannel;
real_T *gpu_diff_im;
real_T *gpu_expanded;
real_T *gpu_y;
real_T *gpu_restoreOut;
uint8_T *b_gpu_restoreOut;
real_T *gpu_b;
uint8_T *gpu_im_gray;
real_T *gpu_localBins3;
real_T *gpu_localBins2;
real_T *gpu_localBins1;
real_T *gpu_cdf;
int32_T *gpu_y_size;
int32_T *b_gpu_y_size;
int32_T *gpu_ii_size;
int32_T *gpu_T_size;
real_T *gpu_T_data;
uint8_T *gpu_out;
boolean_T im_gray_dirtyOnGpu;
boolean_T localBins3_dirtyOnGpu;
boolean_T localBins2_dirtyOnGpu;
boolean_T localBins1_dirtyOnGpu;
boolean_T cdf_dirtyOnGpu;
boolean_T T_size_dirtyOnGpu;
boolean_T localBins3_dirtyOnCpu;
boolean_T localBins2_dirtyOnCpu;
boolean_T localBins1_dirtyOnCpu;
boolean_T cdf_dirtyOnCpu;
boolean_T T_data_dirtyOnCpu;
boolean_T exitg1;
hipMalloc(&gpu_out, 921600ULL);
hipMalloc(&gpu_T_data, 771U * sizeof(real_T));
hipMalloc(&gpu_T_size, 8ULL);
hipMalloc(&gpu_y_size, 8ULL);
hipMalloc(&b_gpu_y_size, 8ULL);
hipMalloc(&gpu_ii_size, 4ULL);
hipMalloc(&gpu_localBins1, 2048ULL);
hipMalloc(&gpu_localBins2, 2048ULL);
hipMalloc(&gpu_localBins3, 2048ULL);
hipMalloc(&gpu_cdf, 2048ULL);
hipMalloc(&gpu_im_gray, 307200ULL);
hipMalloc(&gpu_b, 24ULL);
hipMalloc(&b_gpu_restoreOut, 921600ULL);
hipMalloc(&gpu_restoreOut, 7372800ULL);
hipMalloc(&gpu_y, 2457600ULL);
hipMalloc(&gpu_diff_im, 2457600ULL);
hipMalloc(&gpu_expanded, 2475552ULL);
hipMalloc(&gpu_darkChannel, 2457600ULL);
hipMalloc(&b_gpu_input, 7372800ULL);
hipMalloc(&gpu_input, 921600ULL);
T_data_dirtyOnCpu = false;
cdf_dirtyOnCpu = false;
localBins1_dirtyOnCpu = false;
localBins2_dirtyOnCpu = false;
localBins3_dirtyOnCpu = false;
// restoreOut is used to store the output of restoration
// Changing the precision level of input image to double
hipMemcpy((void *)gpu_input, (void *)&input[0], 921600ULL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fog_rectification_kernel1), dim3(dim3(1800U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_input, b_gpu_input);
// Dark channel Estimation from input
hipLaunchKernelGGL(( fog_rectification_kernel2), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
b_gpu_input, gpu_darkChannel);
// diff_im is used as input and output variable for anisotropic diffusion
hipLaunchKernelGGL(( fog_rectification_kernel3), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_darkChannel, gpu_diff_im);
// 2D convolution mask for Anisotropic diffusion
// Refine dark channel using Anisotropic diffusion.
for (idx = 0; idx < 3; idx++) {
hipLaunchKernelGGL(( fog_rectification_kernel4), dim3(dim3(605U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_expanded);
hipLaunchKernelGGL(( fog_rectification_kernel5), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_diff_im, gpu_expanded);
hipMemcpyToSymbol(const_b, b, 72ULL, 0ULL, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fog_rectification_kernel6), dim3(dim3(15U, 20U, 1U)), dim3(dim3(32U, 32U, 1U)), 0, 0,
gpu_expanded, gpu_diff_im);
}
// Reduction with min
hipLaunchKernelGGL(( fog_rectification_kernel7), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_diff_im, gpu_y);
// Parallel element-wise math to compute
// Restoration with inverse Koschmieder's law
hipLaunchKernelGGL(( fog_rectification_kernel8), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, gpu_y,
gpu_diff_im, gpu_darkChannel);
hipLaunchKernelGGL(( fog_rectification_kernel9), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_darkChannel, gpu_diff_im, b_gpu_input, gpu_restoreOut);
hipLaunchKernelGGL(( fog_rectification_kernel10), dim3(dim3(1800U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_restoreOut, b_gpu_restoreOut);
// %%%%%% streching performs the histogram streching of the image %%%%%%%
// %%%%%%%% im is the input color image and p is cdf limit
// %%%%% out is the contrast streched image and cdf is the cumulative prob
// %%%%% density function and T is the streching function
// rgbtograyconversion
hipMemcpy((void *)gpu_b, (void *)&b_b[0], 24ULL, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fog_rectification_kernel11), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, gpu_b,
b_gpu_restoreOut, gpu_im_gray);
im_gray_dirtyOnGpu = true;
// histogram calculation
hipLaunchKernelGGL(( fog_rectification_kernel12), dim3(dim3(1U, 1U, 1U)), dim3(dim3(256U, 1U, 1U)), 0, 0,
gpu_localBins3, gpu_localBins2, gpu_localBins1, gpu_cdf);
cdf_dirtyOnGpu = true;
localBins1_dirtyOnGpu = true;
localBins2_dirtyOnGpu = true;
localBins3_dirtyOnGpu = true;
for (i = 1; i + 3 <= 307200; i += 4) {
if (im_gray_dirtyOnGpu) {
hipMemcpy((void *)&im_gray[0], (void *)gpu_im_gray, 307200ULL,
hipMemcpyDeviceToHost);
im_gray_dirtyOnGpu = false;
}
if (localBins1_dirtyOnGpu) {
hipMemcpy((void *)&localBins1[0], (void *)gpu_localBins1, 2048ULL,
hipMemcpyDeviceToHost);
localBins1_dirtyOnGpu = false;
}
localBins1[im_gray[i - 1]]++;
localBins1_dirtyOnCpu = true;
if (localBins2_dirtyOnGpu) {
hipMemcpy((void *)&localBins2[0], (void *)gpu_localBins2, 2048ULL,
hipMemcpyDeviceToHost);
localBins2_dirtyOnGpu = false;
}
localBins2[im_gray[i]]++;
localBins2_dirtyOnCpu = true;
if (localBins3_dirtyOnGpu) {
hipMemcpy((void *)&localBins3[0], (void *)gpu_localBins3, 2048ULL,
hipMemcpyDeviceToHost);
localBins3_dirtyOnGpu = false;
}
localBins3[im_gray[i + 1]]++;
localBins3_dirtyOnCpu = true;
if (cdf_dirtyOnGpu) {
hipMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
hipMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[im_gray[i + 2]]++;
cdf_dirtyOnCpu = true;
}
for (idx = 0; idx < 307200; idx++) {
if (1 + idx >= i) {
if (im_gray_dirtyOnGpu) {
hipMemcpy((void *)&im_gray[0], (void *)gpu_im_gray, 307200ULL,
hipMemcpyDeviceToHost);
im_gray_dirtyOnGpu = false;
}
if (cdf_dirtyOnGpu) {
hipMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
hipMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[im_gray[idx]]++;
cdf_dirtyOnCpu = true;
}
}
// cumulative Sum calculation
if (localBins3_dirtyOnCpu) {
hipMemcpy((void *)gpu_localBins3, (void *)&localBins3[0], 2048ULL,
hipMemcpyHostToDevice);
}
if (localBins2_dirtyOnCpu) {
hipMemcpy((void *)gpu_localBins2, (void *)&localBins2[0], 2048ULL,
hipMemcpyHostToDevice);
}
if (localBins1_dirtyOnCpu) {
hipMemcpy((void *)gpu_localBins1, (void *)&localBins1[0], 2048ULL,
hipMemcpyHostToDevice);
}
if (cdf_dirtyOnCpu) {
hipMemcpy((void *)gpu_cdf, (void *)&cdf[0], 2048ULL, hipMemcpyHostToDevice);
}
hipLaunchKernelGGL(( fog_rectification_kernel13), dim3(dim3(1U, 1U, 1U)), dim3(dim3(256U, 1U, 1U)), 0, 0,
gpu_localBins3, gpu_localBins2, gpu_localBins1, gpu_cdf);
cdf_dirtyOnGpu = true;
for (i = 0; i < 255; i++) {
if (cdf_dirtyOnGpu) {
hipMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
hipMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[1 + i] += cdf[i];
}
// finding less than particular probability
idx = 0;
ii_size[0] = 256;
i = 1;
exitg1 = false;
while ((!exitg1) && (i < 257)) {
if (cdf_dirtyOnGpu) {
hipMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
hipMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
if (cdf[i - 1] <= 0.05) {
idx++;
if (idx >= 256) {
exitg1 = true;
} else {
i++;
}
} else {
i++;
}
}
if (1 > idx) {
varargin_1 = 0;
ii_size[0] = 0;
} else {
varargin_1 = idx;
ii_size[0] = idx;
}
idx = 0;
i = 1;
exitg1 = false;
while ((!exitg1) && (i < 257)) {
if (cdf_dirtyOnGpu) {
hipMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
hipMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
if (cdf[i - 1] >= 0.95) {
idx++;
if (idx >= 256) {
exitg1 = true;
} else {
i++;
}
} else {
i++;
}
}
if (1 > idx) {
i0 = 0;
b_ii_size[0] = 0;
} else {
i0 = idx;
b_ii_size[0] = idx;
}
y = 25.0 / (real_T)ii_size[0];
b_y = 204.0 / (255.0 - (real_T)(b_ii_size[0] + ii_size[0]));
if (255 - i0 < varargin_1 + 1) {
y_size[0] = 1;
y_size[1] = 0;
} else if (ii_size[0] + 1 == varargin_1 + 1) {
i1 = (int16_T)(varargin_1 + 1);
i2 = (int16_T)(255 - i0);
y_size[0] = 1;
y_size[1] = (int16_T)((int16_T)(255 - b_ii_size[0]) - (int16_T)(ii_size[0] +
1)) + 1;
for (i = 0; i <= (int32_T)(int16_T)(i2 - i1); i++) {
y_data[i] = (int16_T)((int16_T)(varargin_1 + 1) + i);
}
} else {
ndbl = (int32_T)::floor((254.0 - (real_T)(b_ii_size[0] + ii_size[0])) +
0.5);
i = varargin_1 + ndbl;
idx = (i + i0) - 254;
absb = (int32_T)std::abs(255.0 - (real_T)i0);
u0 = varargin_1 + 1;
if (u0 > absb) {
absb = u0;
}
if (std::abs((real_T)idx) < 4.4408920985006262E-16 * (real_T)absb) {
ndbl++;
u0 = 255 - i0;
} else if (idx > 0) {
u0 = varargin_1 + ndbl;
} else {
ndbl++;
u0 = i + 1;
}
if (ndbl >= 0) {
idx = ndbl;
} else {
idx = 0;
}
y_size[0] = 1;
y_size[1] = idx;
if (idx > 0) {
y_data[0] = (real_T)varargin_1 + 1.0;
if (idx > 1) {
y_data[idx - 1] = u0;
absb = (idx - 1) / 2;
for (i = 0; i < 126; i++) {
if (1 + i <= absb - 1) {
y_data[1 + i] = ((real_T)(varargin_1 + i) + 1.0) + 1.0;
y_data[(idx - i) - 2] = (u0 - i) - 1;
}
}
if (absb << 1 == idx - 1) {
y_data[absb] = ((real_T)(varargin_1 + u0) + 1.0) / 2.0;
} else {
y_data[absb] = (real_T)(varargin_1 + absb) + 1.0;
y_data[absb + 1] = u0 - absb;
}
}
}
}
c_y = 26.0 / (255.0 - (255.0 - (real_T)b_ii_size[0]));
if (255 < 256 - b_ii_size[0]) {
b_y_size[0] = 1;
b_y_size[1] = 0;
} else {
u1 = (uint32_T)((255.0 - (real_T)b_ii_size[0]) + 1.0);
b_y_size[0] = 1;
b_y_size[1] = (int32_T)(255.0 - ((255.0 - (real_T)i0) + 1.0)) + 1;
for (i = 0; i <= (int32_T)(255.0 - (real_T)u1); i++) {
if (cdf_dirtyOnGpu) {
hipMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
hipMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[i] = ((255.0 - (real_T)i0) + 1.0) + (real_T)i;
}
}
d_y = 204.0 / (255.0 - (real_T)(i0 + varargin_1)) * (real_T)varargin_1;
e_y = 26.0 / (255.0 - (255.0 - (real_T)i0)) * (255.0 - (real_T)b_ii_size[0]);
hipMemcpy((void *)gpu_y_size, (void *)&b_y_size[0], 8ULL,
hipMemcpyHostToDevice);
hipMemcpy((void *)b_gpu_y_size, (void *)&y_size[0], 8ULL,
hipMemcpyHostToDevice);
hipMemcpy((void *)gpu_ii_size, (void *)&ii_size[0], 4ULL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fog_rectification_kernel14), dim3(dim3(1U, 1U, 1U)), dim3(dim3(32U, 1U, 1U)), 0, 0, gpu_y_size,
b_gpu_y_size, gpu_ii_size, gpu_T_size);
T_size_dirtyOnGpu = true;
for (i0 = 0; i0 <= varargin_1; i0++) {
if (T_size_dirtyOnGpu) {
hipMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
hipMemcpyDeviceToHost);
T_size_dirtyOnGpu = false;
}
T_data[i0] = y * (real_T)i0;
T_data_dirtyOnCpu = true;
}
i = y_size[1];
for (i0 = 0; i0 < i; i0++) {
if (T_size_dirtyOnGpu) {
hipMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
hipMemcpyDeviceToHost);
T_size_dirtyOnGpu = false;
}
T_data[(i0 + varargin_1) + 1] = (b_y * y_data[i0] - d_y) + 25.0;
T_data_dirtyOnCpu = true;
}
i = b_y_size[1];
for (i0 = 0; i0 < i; i0++) {
if (T_size_dirtyOnGpu) {
hipMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
hipMemcpyDeviceToHost);
T_size_dirtyOnGpu = false;
}
if (cdf_dirtyOnGpu) {
hipMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
hipMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
T_data[((i0 + varargin_1) + y_size[1]) + 1] = (c_y * cdf[i0] - e_y) + 229.0;
T_data_dirtyOnCpu = true;
}
if (T_size_dirtyOnGpu) {
hipMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
hipMemcpyDeviceToHost);
}
i = T_size[1];
if (T_data_dirtyOnCpu) {
hipMemcpy((void *)gpu_T_data, (void *)&T_data[0], T_size[0] * T_size[1] *
sizeof(real_T), hipMemcpyHostToDevice);
}
hipLaunchKernelGGL(( fog_rectification_kernel15), dim3(dim3(2U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0, i,
gpu_T_data);
// Replacing the value from look up table
hipLaunchKernelGGL(( fog_rectification_kernel16), dim3(dim3(1800U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
b_gpu_restoreOut);
hipLaunchKernelGGL(( fog_rectification_kernel17), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_T_data, b_gpu_restoreOut, gpu_out);
hipLaunchKernelGGL(( fog_rectification_kernel18), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_T_data, b_gpu_restoreOut, gpu_out);
hipLaunchKernelGGL(( fog_rectification_kernel19), dim3(dim3(600U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_T_data, b_gpu_restoreOut, gpu_out);
hipMemcpy((void *)&out[0], (void *)gpu_out, 921600ULL, hipMemcpyDeviceToHost);
hipFree(gpu_input);
hipFree(b_gpu_input);
hipFree(gpu_darkChannel);
hipFree(gpu_expanded);
hipFree(gpu_diff_im);
hipFree(gpu_y);
hipFree(gpu_restoreOut);
hipFree(b_gpu_restoreOut);
hipFree(gpu_b);
hipFree(gpu_im_gray);
hipFree(gpu_cdf);
hipFree(gpu_localBins3);
hipFree(gpu_localBins2);
hipFree(gpu_localBins1);
hipFree(gpu_ii_size);
hipFree(b_gpu_y_size);
hipFree(gpu_y_size);
hipFree(gpu_T_size);
hipFree(gpu_T_data);
hipFree(gpu_out);
}
//
// File trailer for fog_rectification.cu
//
// [EOF]
//
| a35662f6791a34df6c8ab5d2f8c3eed3ed34a5b2.cu | //
// Academic License - for use in teaching, academic research, and meeting
// course requirements at degree granting institutions only. Not for
// government, commercial, or other organizational use.
// File: fog_rectification.cu
//
// GPU Coder version : 1.0
// CUDA/C/C++ source code generated on : 25-Jan-2018 08:58:04
//
// Include Files
#include "rt_nonfinite.h"
#include "fog_rectification.h"
// Variable Definitions
__constant__ real_T const_b[9];
// Function Declarations
static __global__ void fog_rectification_kernel1(const uint8_T *input, real_T
*b_input);
static __global__ void fog_rectification_kernel10(real_T *restoreOut, uint8_T
*b_restoreOut);
static __global__ void fog_rectification_kernel11(const real_T *b, uint8_T
*restoreOut, uint8_T *im_gray);
static __global__ void fog_rectification_kernel12(real_T *localBins3, real_T
*localBins2, real_T *localBins1, real_T *cdf);
static __global__ void fog_rectification_kernel13(real_T *localBins3, real_T
*localBins2, real_T *localBins1, real_T *cdf);
static __global__ void fog_rectification_kernel14(int32_T *y_size, int32_T
*b_y_size, int32_T *ii_size, int32_T *T_size);
static __global__ void fog_rectification_kernel15(int32_T i, real_T *T_data);
static __global__ void fog_rectification_kernel16(uint8_T *restoreOut);
static __global__ void fog_rectification_kernel17(real_T *T_data, uint8_T
*restoreOut, uint8_T *out);
static __global__ void fog_rectification_kernel18(real_T *T_data, uint8_T
*restoreOut, uint8_T *out);
static __global__ void fog_rectification_kernel19(real_T *T_data, uint8_T
*restoreOut, uint8_T *out);
static __global__ void fog_rectification_kernel2(real_T *input, real_T
*darkChannel);
static __global__ void fog_rectification_kernel3(real_T *darkChannel, real_T
*diff_im);
static __global__ void fog_rectification_kernel4(real_T *expanded);
static __global__ void fog_rectification_kernel5(real_T *diff_im, real_T
*expanded);
static __global__ void fog_rectification_kernel6(real_T *expanded, real_T
*diff_im);
static __global__ void fog_rectification_kernel7(real_T *diff_im, real_T *y);
static __global__ void fog_rectification_kernel8(real_T *y, real_T *diff_im,
real_T *darkChannel);
static __global__ void fog_rectification_kernel9(real_T *darkChannel, real_T
*diff_im, real_T *input, real_T *restoreOut);
static __device__ real_T rt_roundd_snf(real_T u);
// Function Definitions
//
// Arguments : uint3 blockArg
// uint3 gridArg
// const uint8_T *input
// real_T *b_input
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel1(const
uint8_T *input, real_T *b_input)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 921600)) {
// restoreOut is used to store the output of restoration
// Changing the precision level of input image to double
b_input[j] = (real_T)input[j] / 255.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *restoreOut
// uint8_T *b_restoreOut
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel10
(real_T *restoreOut, uint8_T *b_restoreOut)
{
real_T cv;
int32_T j;
uint8_T u0;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 921600)) {
cv = rt_roundd_snf(255.0 * restoreOut[j]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
b_restoreOut[j] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// const real_T *b
// uint8_T *restoreOut
// uint8_T *im_gray
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel11(
const real_T *b, uint8_T *restoreOut, uint8_T *im_gray)
{
uint8_T a[3];
int32_T j;
real_T cv;
int32_T n;
uint8_T u0;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// %%%%%% streching performs the histogram streching of the image %%%%%%%
// %%%%%%%% im is the input color image and p is cdf limit
// %%%%% out is the contrast streched image and cdf is the cumulative prob
// %%%%% density function and T is the streching function
// rgbtograyconversion
a[0] = restoreOut[j];
a[1] = restoreOut[j + 307200];
a[2] = restoreOut[j + 614400];
cv = 0.0;
for (n = 0; n < 3; n++) {
cv += (real_T)a[n] * b[n];
}
cv = rt_roundd_snf(cv);
if (cv < 256.0) {
u0 = (uint8_T)cv;
} else {
u0 = MAX_uint8_T;
}
im_gray[j] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *localBins3
// real_T *localBins2
// real_T *localBins1
// real_T *cdf
// Return Type : void
//
static __global__ __launch_bounds__(256, 1) void fog_rectification_kernel12
(real_T *localBins3, real_T *localBins2, real_T *localBins1, real_T *cdf)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 256)) {
// histogram calculation
cdf[j] = 0.0;
localBins1[j] = 0.0;
localBins2[j] = 0.0;
localBins3[j] = 0.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *localBins3
// real_T *localBins2
// real_T *localBins1
// real_T *cdf
// Return Type : void
//
static __global__ __launch_bounds__(256, 1) void fog_rectification_kernel13
(real_T *localBins3, real_T *localBins2, real_T *localBins1, real_T *cdf)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 256)) {
// cumulative Sum calculation
cdf[j] = ((cdf[j] + localBins1[j]) + localBins2[j]) + localBins3[j];
cdf[j] /= 307200.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// int32_T *y_size
// int32_T *b_y_size
// int32_T *ii_size
// int32_T *T_size
// Return Type : void
//
static __global__ __launch_bounds__(32, 1) void fog_rectification_kernel14
(int32_T *y_size, int32_T *b_y_size, int32_T *ii_size, int32_T *T_size)
{
;
;
if (!(int32_T)((int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x *
blockIdx.y) + blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x) +
threadIdx.x) >= 1)) {
T_size[0] = 1;
T_size[1] = ((ii_size[0] + b_y_size[1]) + y_size[1]) + 1;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// int32_T i
// real_T *T_data
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel15
(int32_T i, real_T *T_data)
{
int32_T n;
;
;
n = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if ((!(int32_T)(n >= 768)) && ((int32_T)(1 + n <= i))) {
T_data[n] = floor(T_data[n]);
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// uint8_T *restoreOut
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel16
(uint8_T *restoreOut)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if ((!(int32_T)(j >= 921600)) && ((int32_T)((int32_T)restoreOut[j] == 0))) {
// Replacing the value from look up table
restoreOut[j] = 1;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *T_data
// uint8_T *restoreOut
// uint8_T *out
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel17
(real_T *T_data, uint8_T *restoreOut, uint8_T *out)
{
uint32_T threadId;
real_T cv;
int32_T j;
int32_T i0;
uint8_T u0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
cv = rt_roundd_snf(T_data[(int32_T)restoreOut[j + 480 * i0] - 1]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
out[j + 480 * i0] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *T_data
// uint8_T *restoreOut
// uint8_T *out
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel18
(real_T *T_data, uint8_T *restoreOut, uint8_T *out)
{
uint32_T threadId;
real_T cv;
int32_T j;
int32_T i0;
uint8_T u0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
cv = rt_roundd_snf(T_data[(int32_T)restoreOut[307200 + (j + 480 * i0)] - 1]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
out[307200 + (j + 480 * i0)] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *T_data
// uint8_T *restoreOut
// uint8_T *out
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel19
(real_T *T_data, uint8_T *restoreOut, uint8_T *out)
{
uint32_T threadId;
real_T cv;
int32_T j;
int32_T i0;
uint8_T u0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
cv = rt_roundd_snf(T_data[(int32_T)restoreOut[614400 + (j + 480 * i0)] - 1]);
if (cv < 256.0) {
if (cv >= 0.0) {
u0 = (uint8_T)cv;
} else {
u0 = 0;
}
} else if (cv >= 256.0) {
u0 = MAX_uint8_T;
} else {
u0 = 0;
}
out[614400 + (j + 480 * i0)] = u0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *input
// real_T *darkChannel
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel2
(real_T *input, real_T *darkChannel)
{
real_T cv;
int32_T j;
int32_T n;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// Dark channel Estimation from input
cv = input[j];
for (n = j + 307201; n <= j + 614401; n += 307200) {
if (input[n - 1] < cv) {
cv = input[n - 1];
}
}
darkChannel[j] = cv;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *darkChannel
// real_T *diff_im
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel3
(real_T *darkChannel, real_T *diff_im)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// diff_im is used as input and output variable for anisotropic diffusion
diff_im[j] = 0.9 * darkChannel[j];
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *expanded
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel4
(real_T *expanded)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 309444)) {
expanded[j] = 0.0;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *diff_im
// real_T *expanded
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel5
(real_T *diff_im, real_T *expanded)
{
uint32_T threadId;
int32_T j;
int32_T i0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
expanded[(j + 482 * (1 + i0)) + 1] = diff_im[j + 480 * i0];
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *expanded
// real_T *diff_im
// Return Type : void
//
static __global__ __launch_bounds__(1024, 1) void fog_rectification_kernel6
(real_T *expanded, real_T *diff_im)
{
real_T cv;
int32_T n;
int32_T j;
int32_T threadIdY;
int32_T threadIdX;
__shared__ real_T expanded_shared[1156];
int32_T baseR;
int32_T srow;
int32_T strideRow;
int32_T scol;
int32_T strideCol;
int32_T y_idx;
int32_T baseC;
int32_T x_idx;
;
;
threadIdY = (int32_T)(blockDim.y * blockIdx.y + threadIdx.y);
threadIdX = (int32_T)(blockDim.x * blockIdx.x + threadIdx.x);
baseR = threadIdX;
srow = (int32_T)threadIdx.x;
strideRow = (int32_T)blockDim.x;
scol = (int32_T)threadIdx.y;
strideCol = (int32_T)blockDim.y;
for (y_idx = srow; y_idx <= 33; y_idx += strideRow) {
baseC = threadIdY;
for (x_idx = scol; x_idx <= 33; x_idx += strideCol) {
if (((int32_T)(((int32_T)(baseR >= 0)) && ((int32_T)(baseR < 482)))) &&
((int32_T)(((int32_T)(baseC >= 0)) && ((int32_T)(baseC < 642))))) {
expanded_shared[y_idx + 34 * x_idx] = (real_T)expanded[482 * baseC +
baseR];
} else {
expanded_shared[y_idx + 34 * x_idx] = 0.0;
}
baseC += strideCol;
}
baseR += strideRow;
}
__syncthreads();
if ((!(int32_T)(threadIdX >= 480)) && (!(int32_T)(threadIdY >= 640))) {
cv = 0.0;
for (n = 0; n < 3; n++) {
for (j = 0; j < 3; j++) {
cv += expanded_shared[((int32_T)threadIdx.x + ((j + threadIdX) -
threadIdX)) + 34 * ((int32_T)threadIdx.y + ((n + threadIdY) -
threadIdY))] * const_b[(3 * (2 - n) - j) + 2];
}
}
diff_im[threadIdX + 480 * threadIdY] = cv;
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *diff_im
// real_T *y
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel7
(real_T *diff_im, real_T *y)
{
int32_T j;
;
;
j = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(j >= 307200)) {
// Reduction with min
y[j] = diff_im[j];
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *y
// real_T *diff_im
// real_T *darkChannel
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel8
(real_T *y, real_T *diff_im, real_T *darkChannel)
{
real_T u1;
int32_T n;
;
;
n = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(n >= 307200)) {
// Parallel element-wise math to compute
// Restoration with inverse Koschmieder's law
u1 = y[n];
if (darkChannel[n] < y[n]) {
u1 = darkChannel[n];
}
diff_im[n] = u1;
diff_im[n] *= 0.6;
darkChannel[n] = 1.0 / (1.0 - diff_im[n]);
}
}
//
// Arguments : uint3 blockArg
// uint3 gridArg
// real_T *darkChannel
// real_T *diff_im
// real_T *input
// real_T *restoreOut
// Return Type : void
//
static __global__ __launch_bounds__(512, 1) void fog_rectification_kernel9
(real_T *darkChannel, real_T *diff_im, real_T *input, real_T *restoreOut)
{
uint32_T threadId;
int32_T j;
int32_T i0;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
i0 = (int32_T)(threadId / 480U);
j = (int32_T)(threadId - (uint32_T)i0 * 480U);
if ((!(int32_T)(j >= 480)) && (!(int32_T)(i0 >= 640))) {
restoreOut[j + 480 * i0] = (input[j + 480 * i0] - diff_im[j + 480 * i0]) *
darkChannel[j + 480 * i0];
restoreOut[307200 + (j + 480 * i0)] = (input[307200 + (j + 480 * i0)] -
diff_im[j + 480 * i0]) * darkChannel[j + 480 * i0];
restoreOut[614400 + (j + 480 * i0)] = (input[614400 + (j + 480 * i0)] -
diff_im[j + 480 * i0]) * darkChannel[j + 480 * i0];
}
}
//
// Arguments : real_T u
// Return Type : real_T
//
static __device__ real_T rt_roundd_snf(real_T u)
{
real_T y;
if (fabs(u) < 4.503599627370496E+15) {
if (u >= 0.5) {
y = floor(u + 0.5);
} else if (u > -0.5) {
y = u * 0.0;
} else {
y = ceil(u - 0.5);
}
} else {
y = u;
}
return y;
}
//
// Copyright 2017 The MathWorks, Inc.
// Arguments : const uint8_T input[921600]
// uint8_T out[921600]
// Return Type : void
//
void fog_rectification(const uint8_T input[921600], uint8_T out[921600])
{
int32_T idx;
int32_T i0;
int32_T i;
static const real_T b[9] = { 0.0625, 0.125, 0.0625, 0.125, 0.25, 0.125, 0.0625,
0.125, 0.0625 };
static uint8_T im_gray[307200];
static const real_T b_b[3] = { 0.29893602129377539, 0.58704307445112125,
0.11402090425510336 };
real_T cdf[256];
real_T localBins1[256];
real_T localBins2[256];
real_T localBins3[256];
int32_T ii_size[1];
int32_T varargin_1;
int32_T b_ii_size[1];
real_T y;
real_T b_y;
real_T y_data[255];
int32_T y_size[2];
int32_T ndbl;
int16_T i1;
int16_T i2;
real_T c_y;
int32_T absb;
int32_T u0;
uint32_T u1;
int32_T b_y_size[2];
real_T d_y;
real_T e_y;
int32_T T_size[2];
real_T T_data[771];
uint8_T *gpu_input;
real_T *b_gpu_input;
real_T *gpu_darkChannel;
real_T *gpu_diff_im;
real_T *gpu_expanded;
real_T *gpu_y;
real_T *gpu_restoreOut;
uint8_T *b_gpu_restoreOut;
real_T *gpu_b;
uint8_T *gpu_im_gray;
real_T *gpu_localBins3;
real_T *gpu_localBins2;
real_T *gpu_localBins1;
real_T *gpu_cdf;
int32_T *gpu_y_size;
int32_T *b_gpu_y_size;
int32_T *gpu_ii_size;
int32_T *gpu_T_size;
real_T *gpu_T_data;
uint8_T *gpu_out;
boolean_T im_gray_dirtyOnGpu;
boolean_T localBins3_dirtyOnGpu;
boolean_T localBins2_dirtyOnGpu;
boolean_T localBins1_dirtyOnGpu;
boolean_T cdf_dirtyOnGpu;
boolean_T T_size_dirtyOnGpu;
boolean_T localBins3_dirtyOnCpu;
boolean_T localBins2_dirtyOnCpu;
boolean_T localBins1_dirtyOnCpu;
boolean_T cdf_dirtyOnCpu;
boolean_T T_data_dirtyOnCpu;
boolean_T exitg1;
cudaMalloc(&gpu_out, 921600ULL);
cudaMalloc(&gpu_T_data, 771U * sizeof(real_T));
cudaMalloc(&gpu_T_size, 8ULL);
cudaMalloc(&gpu_y_size, 8ULL);
cudaMalloc(&b_gpu_y_size, 8ULL);
cudaMalloc(&gpu_ii_size, 4ULL);
cudaMalloc(&gpu_localBins1, 2048ULL);
cudaMalloc(&gpu_localBins2, 2048ULL);
cudaMalloc(&gpu_localBins3, 2048ULL);
cudaMalloc(&gpu_cdf, 2048ULL);
cudaMalloc(&gpu_im_gray, 307200ULL);
cudaMalloc(&gpu_b, 24ULL);
cudaMalloc(&b_gpu_restoreOut, 921600ULL);
cudaMalloc(&gpu_restoreOut, 7372800ULL);
cudaMalloc(&gpu_y, 2457600ULL);
cudaMalloc(&gpu_diff_im, 2457600ULL);
cudaMalloc(&gpu_expanded, 2475552ULL);
cudaMalloc(&gpu_darkChannel, 2457600ULL);
cudaMalloc(&b_gpu_input, 7372800ULL);
cudaMalloc(&gpu_input, 921600ULL);
T_data_dirtyOnCpu = false;
cdf_dirtyOnCpu = false;
localBins1_dirtyOnCpu = false;
localBins2_dirtyOnCpu = false;
localBins3_dirtyOnCpu = false;
// restoreOut is used to store the output of restoration
// Changing the precision level of input image to double
cudaMemcpy((void *)gpu_input, (void *)&input[0], 921600ULL,
cudaMemcpyHostToDevice);
fog_rectification_kernel1<<<dim3(1800U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_input, b_gpu_input);
// Dark channel Estimation from input
fog_rectification_kernel2<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(b_gpu_input, gpu_darkChannel);
// diff_im is used as input and output variable for anisotropic diffusion
fog_rectification_kernel3<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_darkChannel, gpu_diff_im);
// 2D convolution mask for Anisotropic diffusion
// Refine dark channel using Anisotropic diffusion.
for (idx = 0; idx < 3; idx++) {
fog_rectification_kernel4<<<dim3(605U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_expanded);
fog_rectification_kernel5<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_diff_im, gpu_expanded);
cudaMemcpyToSymbol(const_b, b, 72ULL, 0ULL, cudaMemcpyHostToDevice);
fog_rectification_kernel6<<<dim3(15U, 20U, 1U), dim3(32U, 32U, 1U)>>>
(gpu_expanded, gpu_diff_im);
}
// Reduction with min
fog_rectification_kernel7<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_diff_im, gpu_y);
// Parallel element-wise math to compute
// Restoration with inverse Koschmieder's law
fog_rectification_kernel8<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>(gpu_y,
gpu_diff_im, gpu_darkChannel);
fog_rectification_kernel9<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_darkChannel, gpu_diff_im, b_gpu_input, gpu_restoreOut);
fog_rectification_kernel10<<<dim3(1800U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_restoreOut, b_gpu_restoreOut);
// %%%%%% streching performs the histogram streching of the image %%%%%%%
// %%%%%%%% im is the input color image and p is cdf limit
// %%%%% out is the contrast streched image and cdf is the cumulative prob
// %%%%% density function and T is the streching function
// rgbtograyconversion
cudaMemcpy((void *)gpu_b, (void *)&b_b[0], 24ULL, cudaMemcpyHostToDevice);
fog_rectification_kernel11<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>(gpu_b,
b_gpu_restoreOut, gpu_im_gray);
im_gray_dirtyOnGpu = true;
// histogram calculation
fog_rectification_kernel12<<<dim3(1U, 1U, 1U), dim3(256U, 1U, 1U)>>>
(gpu_localBins3, gpu_localBins2, gpu_localBins1, gpu_cdf);
cdf_dirtyOnGpu = true;
localBins1_dirtyOnGpu = true;
localBins2_dirtyOnGpu = true;
localBins3_dirtyOnGpu = true;
for (i = 1; i + 3 <= 307200; i += 4) {
if (im_gray_dirtyOnGpu) {
cudaMemcpy((void *)&im_gray[0], (void *)gpu_im_gray, 307200ULL,
cudaMemcpyDeviceToHost);
im_gray_dirtyOnGpu = false;
}
if (localBins1_dirtyOnGpu) {
cudaMemcpy((void *)&localBins1[0], (void *)gpu_localBins1, 2048ULL,
cudaMemcpyDeviceToHost);
localBins1_dirtyOnGpu = false;
}
localBins1[im_gray[i - 1]]++;
localBins1_dirtyOnCpu = true;
if (localBins2_dirtyOnGpu) {
cudaMemcpy((void *)&localBins2[0], (void *)gpu_localBins2, 2048ULL,
cudaMemcpyDeviceToHost);
localBins2_dirtyOnGpu = false;
}
localBins2[im_gray[i]]++;
localBins2_dirtyOnCpu = true;
if (localBins3_dirtyOnGpu) {
cudaMemcpy((void *)&localBins3[0], (void *)gpu_localBins3, 2048ULL,
cudaMemcpyDeviceToHost);
localBins3_dirtyOnGpu = false;
}
localBins3[im_gray[i + 1]]++;
localBins3_dirtyOnCpu = true;
if (cdf_dirtyOnGpu) {
cudaMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
cudaMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[im_gray[i + 2]]++;
cdf_dirtyOnCpu = true;
}
for (idx = 0; idx < 307200; idx++) {
if (1 + idx >= i) {
if (im_gray_dirtyOnGpu) {
cudaMemcpy((void *)&im_gray[0], (void *)gpu_im_gray, 307200ULL,
cudaMemcpyDeviceToHost);
im_gray_dirtyOnGpu = false;
}
if (cdf_dirtyOnGpu) {
cudaMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
cudaMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[im_gray[idx]]++;
cdf_dirtyOnCpu = true;
}
}
// cumulative Sum calculation
if (localBins3_dirtyOnCpu) {
cudaMemcpy((void *)gpu_localBins3, (void *)&localBins3[0], 2048ULL,
cudaMemcpyHostToDevice);
}
if (localBins2_dirtyOnCpu) {
cudaMemcpy((void *)gpu_localBins2, (void *)&localBins2[0], 2048ULL,
cudaMemcpyHostToDevice);
}
if (localBins1_dirtyOnCpu) {
cudaMemcpy((void *)gpu_localBins1, (void *)&localBins1[0], 2048ULL,
cudaMemcpyHostToDevice);
}
if (cdf_dirtyOnCpu) {
cudaMemcpy((void *)gpu_cdf, (void *)&cdf[0], 2048ULL, cudaMemcpyHostToDevice);
}
fog_rectification_kernel13<<<dim3(1U, 1U, 1U), dim3(256U, 1U, 1U)>>>
(gpu_localBins3, gpu_localBins2, gpu_localBins1, gpu_cdf);
cdf_dirtyOnGpu = true;
for (i = 0; i < 255; i++) {
if (cdf_dirtyOnGpu) {
cudaMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
cudaMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[1 + i] += cdf[i];
}
// finding less than particular probability
idx = 0;
ii_size[0] = 256;
i = 1;
exitg1 = false;
while ((!exitg1) && (i < 257)) {
if (cdf_dirtyOnGpu) {
cudaMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
cudaMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
if (cdf[i - 1] <= 0.05) {
idx++;
if (idx >= 256) {
exitg1 = true;
} else {
i++;
}
} else {
i++;
}
}
if (1 > idx) {
varargin_1 = 0;
ii_size[0] = 0;
} else {
varargin_1 = idx;
ii_size[0] = idx;
}
idx = 0;
i = 1;
exitg1 = false;
while ((!exitg1) && (i < 257)) {
if (cdf_dirtyOnGpu) {
cudaMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
cudaMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
if (cdf[i - 1] >= 0.95) {
idx++;
if (idx >= 256) {
exitg1 = true;
} else {
i++;
}
} else {
i++;
}
}
if (1 > idx) {
i0 = 0;
b_ii_size[0] = 0;
} else {
i0 = idx;
b_ii_size[0] = idx;
}
y = 25.0 / (real_T)ii_size[0];
b_y = 204.0 / (255.0 - (real_T)(b_ii_size[0] + ii_size[0]));
if (255 - i0 < varargin_1 + 1) {
y_size[0] = 1;
y_size[1] = 0;
} else if (ii_size[0] + 1 == varargin_1 + 1) {
i1 = (int16_T)(varargin_1 + 1);
i2 = (int16_T)(255 - i0);
y_size[0] = 1;
y_size[1] = (int16_T)((int16_T)(255 - b_ii_size[0]) - (int16_T)(ii_size[0] +
1)) + 1;
for (i = 0; i <= (int32_T)(int16_T)(i2 - i1); i++) {
y_data[i] = (int16_T)((int16_T)(varargin_1 + 1) + i);
}
} else {
ndbl = (int32_T)std::floor((254.0 - (real_T)(b_ii_size[0] + ii_size[0])) +
0.5);
i = varargin_1 + ndbl;
idx = (i + i0) - 254;
absb = (int32_T)std::abs(255.0 - (real_T)i0);
u0 = varargin_1 + 1;
if (u0 > absb) {
absb = u0;
}
if (std::abs((real_T)idx) < 4.4408920985006262E-16 * (real_T)absb) {
ndbl++;
u0 = 255 - i0;
} else if (idx > 0) {
u0 = varargin_1 + ndbl;
} else {
ndbl++;
u0 = i + 1;
}
if (ndbl >= 0) {
idx = ndbl;
} else {
idx = 0;
}
y_size[0] = 1;
y_size[1] = idx;
if (idx > 0) {
y_data[0] = (real_T)varargin_1 + 1.0;
if (idx > 1) {
y_data[idx - 1] = u0;
absb = (idx - 1) / 2;
for (i = 0; i < 126; i++) {
if (1 + i <= absb - 1) {
y_data[1 + i] = ((real_T)(varargin_1 + i) + 1.0) + 1.0;
y_data[(idx - i) - 2] = (u0 - i) - 1;
}
}
if (absb << 1 == idx - 1) {
y_data[absb] = ((real_T)(varargin_1 + u0) + 1.0) / 2.0;
} else {
y_data[absb] = (real_T)(varargin_1 + absb) + 1.0;
y_data[absb + 1] = u0 - absb;
}
}
}
}
c_y = 26.0 / (255.0 - (255.0 - (real_T)b_ii_size[0]));
if (255 < 256 - b_ii_size[0]) {
b_y_size[0] = 1;
b_y_size[1] = 0;
} else {
u1 = (uint32_T)((255.0 - (real_T)b_ii_size[0]) + 1.0);
b_y_size[0] = 1;
b_y_size[1] = (int32_T)(255.0 - ((255.0 - (real_T)i0) + 1.0)) + 1;
for (i = 0; i <= (int32_T)(255.0 - (real_T)u1); i++) {
if (cdf_dirtyOnGpu) {
cudaMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
cudaMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
cdf[i] = ((255.0 - (real_T)i0) + 1.0) + (real_T)i;
}
}
d_y = 204.0 / (255.0 - (real_T)(i0 + varargin_1)) * (real_T)varargin_1;
e_y = 26.0 / (255.0 - (255.0 - (real_T)i0)) * (255.0 - (real_T)b_ii_size[0]);
cudaMemcpy((void *)gpu_y_size, (void *)&b_y_size[0], 8ULL,
cudaMemcpyHostToDevice);
cudaMemcpy((void *)b_gpu_y_size, (void *)&y_size[0], 8ULL,
cudaMemcpyHostToDevice);
cudaMemcpy((void *)gpu_ii_size, (void *)&ii_size[0], 4ULL,
cudaMemcpyHostToDevice);
fog_rectification_kernel14<<<dim3(1U, 1U, 1U), dim3(32U, 1U, 1U)>>>(gpu_y_size,
b_gpu_y_size, gpu_ii_size, gpu_T_size);
T_size_dirtyOnGpu = true;
for (i0 = 0; i0 <= varargin_1; i0++) {
if (T_size_dirtyOnGpu) {
cudaMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
cudaMemcpyDeviceToHost);
T_size_dirtyOnGpu = false;
}
T_data[i0] = y * (real_T)i0;
T_data_dirtyOnCpu = true;
}
i = y_size[1];
for (i0 = 0; i0 < i; i0++) {
if (T_size_dirtyOnGpu) {
cudaMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
cudaMemcpyDeviceToHost);
T_size_dirtyOnGpu = false;
}
T_data[(i0 + varargin_1) + 1] = (b_y * y_data[i0] - d_y) + 25.0;
T_data_dirtyOnCpu = true;
}
i = b_y_size[1];
for (i0 = 0; i0 < i; i0++) {
if (T_size_dirtyOnGpu) {
cudaMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
cudaMemcpyDeviceToHost);
T_size_dirtyOnGpu = false;
}
if (cdf_dirtyOnGpu) {
cudaMemcpy((void *)&cdf[0], (void *)gpu_cdf, 2048ULL,
cudaMemcpyDeviceToHost);
cdf_dirtyOnGpu = false;
}
T_data[((i0 + varargin_1) + y_size[1]) + 1] = (c_y * cdf[i0] - e_y) + 229.0;
T_data_dirtyOnCpu = true;
}
if (T_size_dirtyOnGpu) {
cudaMemcpy((void *)&T_size[0], (void *)gpu_T_size, 8ULL,
cudaMemcpyDeviceToHost);
}
i = T_size[1];
if (T_data_dirtyOnCpu) {
cudaMemcpy((void *)gpu_T_data, (void *)&T_data[0], T_size[0] * T_size[1] *
sizeof(real_T), cudaMemcpyHostToDevice);
}
fog_rectification_kernel15<<<dim3(2U, 1U, 1U), dim3(512U, 1U, 1U)>>>(i,
gpu_T_data);
// Replacing the value from look up table
fog_rectification_kernel16<<<dim3(1800U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(b_gpu_restoreOut);
fog_rectification_kernel17<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_T_data, b_gpu_restoreOut, gpu_out);
fog_rectification_kernel18<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_T_data, b_gpu_restoreOut, gpu_out);
fog_rectification_kernel19<<<dim3(600U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_T_data, b_gpu_restoreOut, gpu_out);
cudaMemcpy((void *)&out[0], (void *)gpu_out, 921600ULL, cudaMemcpyDeviceToHost);
cudaFree(gpu_input);
cudaFree(b_gpu_input);
cudaFree(gpu_darkChannel);
cudaFree(gpu_expanded);
cudaFree(gpu_diff_im);
cudaFree(gpu_y);
cudaFree(gpu_restoreOut);
cudaFree(b_gpu_restoreOut);
cudaFree(gpu_b);
cudaFree(gpu_im_gray);
cudaFree(gpu_cdf);
cudaFree(gpu_localBins3);
cudaFree(gpu_localBins2);
cudaFree(gpu_localBins1);
cudaFree(gpu_ii_size);
cudaFree(b_gpu_y_size);
cudaFree(gpu_y_size);
cudaFree(gpu_T_size);
cudaFree(gpu_T_data);
cudaFree(gpu_out);
}
//
// File trailer for fog_rectification.cu
//
// [EOF]
//
|
998a432d4f9c0b06d6bc2ba16c56caed525533d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// \file logistic.cu
///
#include "logistic.hpp"
using namespace std;
template <typename Dtype>
Logistic<Dtype>::Logistic<Dtype>(FullConnectParam* fcp) {
this->_fcp = fcp;
}
template <typename Dtype>
Logistic<Dtype>::~Logistic<Dtype>() {
delete this->_y;
delete[] h_labels;
delete[] y_CPU;
delete[] correct_probs;
delete d_max_pos_of_out;
delete[] h_max_pos_of_out;
delete _d_record;
delete[] _h_record;
}
template <typename Dtype>
void Logistic<Dtype>::initCuda() {
this->_y = new Matrix<Dtype>(this->_fcp->getMinibatchSize(), \
this->_fcp->getNumOut());
h_labels = new int[this->_fcp->getMinibatchSize()];
y_CPU = new Dtype[this->_y->getNumEles()];
correct_probs = new Dtype[this->_y->getNumRows()];
d_max_pos_of_out = new Matrix<Dtype>(this->_y->getNumRows(), 1);
h_max_pos_of_out = new Dtype[this->_y->getNumRows()];
_d_record = new Matrix<int>(this->_y->getNumCols(), this->_y->getNumCols());
_h_record = new int[this->_y->getNumCols() * this->_y->getNumCols()];
}
template <typename Dtype>
void Logistic<Dtype>::computeOutput(Matrix<Dtype>* x){
this->_y->zeros();
x->apply(Matrix<Dtype>::SOFTMAX, this->_y);
}
template <typename Dtype>
double Logistic<Dtype>::computeError(Matrix<int>* labels, int& num_error){
labels->copyToHost(h_labels, labels->getNumEles());
this->_y->copyToHost(y_CPU, this->_y->getNumEles());
/// likelihood
///
this->_y->maxPosInRow(d_max_pos_of_out);
d_max_pos_of_out->copyToHost(h_max_pos_of_out, this->_y->getNumRows());
for (int c = 0; c < this->_y->getNumRows(); c++) {
int true_label = h_labels[c];
int predict_label = h_max_pos_of_out[c];
if(y_CPU[c*this->_y->getNumCols()+true_label] == 0)
correct_probs[c] = -10000;
else
correct_probs[c] = log(y_CPU[c * this->_y->getNumCols() + true_label]);
if(predict_label != true_label)
num_error++;
_h_record[predict_label * this->_y->getNumCols() + true_label]++ ;
}
double result = 0;
for(int i = 0; i < labels->getNumEles(); i++){
result -= correct_probs[i];
}
return result;
}
template <typename Dtype>
void Logistic<Dtype>::computeDerivsOfInput(Matrix<Dtype>* dE_dx, Matrix<int>* labels){
assert(labels->getNumRows() == dE_dx->getNumRows());
dE_dx->zeros();
const int num_thread = DIVUP(this->_fcp->getNumOut(), ADD_BLOCK_SIZE) * ADD_BLOCK_SIZE;
hipLaunchKernelGGL(( compute_dE_dy), dim3(this->_fcp->getMinibatchSize()), dim3(num_thread), 0, 0, this->_y->getDevData(), \
labels->getDevData(), dE_dx->getDevData(), this->_fcp->getNumOut());
hipDeviceSynchronize();
cudaCheckError();
}
| 998a432d4f9c0b06d6bc2ba16c56caed525533d4.cu | ///
/// \file logistic.cu
///
#include "logistic.hpp"
using namespace std;
template <typename Dtype>
Logistic<Dtype>::Logistic<Dtype>(FullConnectParam* fcp) {
this->_fcp = fcp;
}
template <typename Dtype>
Logistic<Dtype>::~Logistic<Dtype>() {
delete this->_y;
delete[] h_labels;
delete[] y_CPU;
delete[] correct_probs;
delete d_max_pos_of_out;
delete[] h_max_pos_of_out;
delete _d_record;
delete[] _h_record;
}
template <typename Dtype>
void Logistic<Dtype>::initCuda() {
this->_y = new Matrix<Dtype>(this->_fcp->getMinibatchSize(), \
this->_fcp->getNumOut());
h_labels = new int[this->_fcp->getMinibatchSize()];
y_CPU = new Dtype[this->_y->getNumEles()];
correct_probs = new Dtype[this->_y->getNumRows()];
d_max_pos_of_out = new Matrix<Dtype>(this->_y->getNumRows(), 1);
h_max_pos_of_out = new Dtype[this->_y->getNumRows()];
_d_record = new Matrix<int>(this->_y->getNumCols(), this->_y->getNumCols());
_h_record = new int[this->_y->getNumCols() * this->_y->getNumCols()];
}
template <typename Dtype>
void Logistic<Dtype>::computeOutput(Matrix<Dtype>* x){
this->_y->zeros();
x->apply(Matrix<Dtype>::SOFTMAX, this->_y);
}
template <typename Dtype>
double Logistic<Dtype>::computeError(Matrix<int>* labels, int& num_error){
labels->copyToHost(h_labels, labels->getNumEles());
this->_y->copyToHost(y_CPU, this->_y->getNumEles());
/// 记录找打的最大位置上的likelihood
/// 记录最大位置的下标
this->_y->maxPosInRow(d_max_pos_of_out);
d_max_pos_of_out->copyToHost(h_max_pos_of_out, this->_y->getNumRows());
for (int c = 0; c < this->_y->getNumRows(); c++) {
int true_label = h_labels[c];
int predict_label = h_max_pos_of_out[c];
if(y_CPU[c*this->_y->getNumCols()+true_label] == 0)
correct_probs[c] = -10000;
else
correct_probs[c] = log(y_CPU[c * this->_y->getNumCols() + true_label]);
if(predict_label != true_label)
num_error++;
_h_record[predict_label * this->_y->getNumCols() + true_label]++ ;
}
double result = 0;
for(int i = 0; i < labels->getNumEles(); i++){
result -= correct_probs[i];
}
return result;
}
template <typename Dtype>
void Logistic<Dtype>::computeDerivsOfInput(Matrix<Dtype>* dE_dx, Matrix<int>* labels){
assert(labels->getNumRows() == dE_dx->getNumRows());
dE_dx->zeros();
const int num_thread = DIVUP(this->_fcp->getNumOut(), ADD_BLOCK_SIZE) * ADD_BLOCK_SIZE;
compute_dE_dy<<<this->_fcp->getMinibatchSize(), num_thread>>>(this->_y->getDevData(), \
labels->getDevData(), dE_dx->getDevData(), this->_fcp->getNumOut());
cudaThreadSynchronize();
cudaCheckError();
}
|
d527d6d0ef013fecbc2154e2c1e1fb1659f5138c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define NXPROB 20480 /* x dimension of problem grid */
#define NYPROB 32768 /* y dimension of problem grid */
#define STEPS 500 /* number of time steps */
#define MAXWORKER 8 /* maximum number of worker tasks */
#define MINWORKER 3 /* minimum number of worker tasks */
#define BEGIN 1 /* message tag */
#define LTAG 2 /* message tag */
#define RTAG 3 /* message tag */
#define NONE 0 /* indicates no neighbor */
#define DONE 4 /* message tag */
#define MASTER 0 /* taskid of first process */
#define BLOCK_H 10
#define BLOCK_V 8
#define THREADS 32
struct Parms {
float cx;
float cy;
} parms = {0.1, 0.1};
/**************************************************************************
* subroutine update
****************************************************************************/
void update(int start, int end, int ny, float *u1, float *u2)
{
int ix, iy;
for (ix = start; ix <= end; ix++)
for (iy = 1; iy <= ny-2; iy++)
*(u2+ix*ny+iy) = *(u1+ix*ny+iy) +
parms.cx * (*(u1+(ix+1)*ny+iy) +
*(u1+(ix-1)*ny+iy) -
2.0 * *(u1+ix*ny+iy)) +
parms.cy * (*(u1+ix*ny+iy+1) +
*(u1+ix*ny+iy-1) -
2.0 * *(u1+ix*ny+iy));
}
/*****************************************************************************
* subroutine inidat
*****************************************************************************/
void inidat(int nx, int ny, float *u) {
int ix, iy;
for (ix = 0; ix <= nx-1; ix++)
for (iy = 0; iy <= ny-1; iy++)
{*(u+ix*ny+iy) = (float)(ix * (nx - ix - 1) * iy * (ny - iy - 1)%1000);
//if (*(u+ix*ny+iy) > 10000.0)
//printf("%f\n", *(u+ix*ny+iy));
}
}
/**************************************************************************
* subroutine prtdat
**************************************************************************/
void prtdat(int nx, int ny, float *u1, const char *fnam) {
int ix, iy;
FILE *fp;
fp = fopen(fnam, "w");
for (iy = ny-1; iy >= 0; iy--) {
for (ix = 0; ix <= nx-1; ix++) {
fprintf(fp, "%6.1f", *(u1+ix*ny+iy));
if (ix != nx-1)
fprintf(fp, " ");
else
fprintf(fp, "\n");
}
}
fclose(fp);
}
__global__ void cuda_update(float *u0, float *u1, struct Parms parms)
{
int ix, iy;
ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
iy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (ix > 0 && iy > 0)
{
if (ix + iy < NXPROB + NYPROB - 2)
{
*(u1+ix*NYPROB+iy) = *(u0+ix*NYPROB+iy) +
parms.cx * (*(u0+(ix+1)*NYPROB+iy) +
*(u0+(ix-1)*NYPROB+iy) -
2.0 * *(u0+ix*NYPROB+iy)) +
parms.cy * (*(u0+ix*NYPROB+iy+1) +
*(u0+ix*NYPROB+iy-1) -
2.0 * *(u0+ix*NYPROB+iy));
}
}
}
__global__ void MyKernel(int *a, int *b, int *c, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
int main (int argc, char *argv[])
{
int i;
float *u;
float *cuda_u0, *cuda_u1;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float ms = 0.0f;
int block_size;
int min_grid, grid;
//http://devblogs.nvidia.com/parallelforall/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/
hipOccupancyMaxPotentialBlockSize(&min_grid, &block_size, MyKernel, 0, NXPROB*NYPROB);
grid = (NXPROB*NYPROB + block_size - 1) / block_size;
dim3 dimBlocks(BLOCK_H, BLOCK_V);
dim3 dimThreads((NXPROB / BLOCK_H) + ((NXPROB % BLOCK_H) != 0), (NYPROB / BLOCK_V) + ((NYPROB % BLOCK_V) != 0));
//malloc host
u = (float*)malloc(NXPROB*NYPROB*sizeof(float));
//malloc device
hipMalloc((void**)&cuda_u0, (NXPROB*NYPROB*sizeof(float)));
hipMalloc((void**)&cuda_u1, (NXPROB*NYPROB*sizeof(float)));
printf("Grid size: X= %d Y= %d Time steps= %d\n",NXPROB,NYPROB,STEPS);
inidat(NXPROB, NYPROB, u); //initialize
//prtdat(NXPROB, NYPROB, u, "initial.dat"); //print
//copy from host to device
hipMemcpy(cuda_u0, u, (NXPROB*NYPROB*sizeof(float)), hipMemcpyHostToDevice);
hipMemcpy(cuda_u1, u, (NXPROB*NYPROB*sizeof(float)), hipMemcpyHostToDevice);
hipEventRecord(start, 0);
for (i = 0; i < STEPS; i+=2)
{
hipLaunchKernelGGL(( cuda_update), dim3(grid), dim3(block_size), 0, 0, cuda_u0, cuda_u1, parms);
hipLaunchKernelGGL(( cuda_update), dim3(grid), dim3(block_size), 0, 0, cuda_u1, cuda_u0, parms);
}
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
//copy from device to host
hipMemcpy(u, cuda_u1, (NXPROB*NYPROB*sizeof(float)), hipMemcpyDeviceToHost);
//prtdat(NXPROB, NYPROB, u, "final.dat"); //print
printf("Time: %f ms\n", ms);
hipFree(cuda_u0);
hipFree(cuda_u1);
free(u);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| d527d6d0ef013fecbc2154e2c1e1fb1659f5138c.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define NXPROB 20480 /* x dimension of problem grid */
#define NYPROB 32768 /* y dimension of problem grid */
#define STEPS 500 /* number of time steps */
#define MAXWORKER 8 /* maximum number of worker tasks */
#define MINWORKER 3 /* minimum number of worker tasks */
#define BEGIN 1 /* message tag */
#define LTAG 2 /* message tag */
#define RTAG 3 /* message tag */
#define NONE 0 /* indicates no neighbor */
#define DONE 4 /* message tag */
#define MASTER 0 /* taskid of first process */
#define BLOCK_H 10
#define BLOCK_V 8
#define THREADS 32
struct Parms {
float cx;
float cy;
} parms = {0.1, 0.1};
/**************************************************************************
* subroutine update
****************************************************************************/
void update(int start, int end, int ny, float *u1, float *u2)
{
int ix, iy;
for (ix = start; ix <= end; ix++)
for (iy = 1; iy <= ny-2; iy++)
*(u2+ix*ny+iy) = *(u1+ix*ny+iy) +
parms.cx * (*(u1+(ix+1)*ny+iy) +
*(u1+(ix-1)*ny+iy) -
2.0 * *(u1+ix*ny+iy)) +
parms.cy * (*(u1+ix*ny+iy+1) +
*(u1+ix*ny+iy-1) -
2.0 * *(u1+ix*ny+iy));
}
/*****************************************************************************
* subroutine inidat
*****************************************************************************/
void inidat(int nx, int ny, float *u) {
int ix, iy;
for (ix = 0; ix <= nx-1; ix++)
for (iy = 0; iy <= ny-1; iy++)
{*(u+ix*ny+iy) = (float)(ix * (nx - ix - 1) * iy * (ny - iy - 1)%1000);
//if (*(u+ix*ny+iy) > 10000.0)
//printf("%f\n", *(u+ix*ny+iy));
}
}
/**************************************************************************
* subroutine prtdat
**************************************************************************/
void prtdat(int nx, int ny, float *u1, const char *fnam) {
int ix, iy;
FILE *fp;
fp = fopen(fnam, "w");
for (iy = ny-1; iy >= 0; iy--) {
for (ix = 0; ix <= nx-1; ix++) {
fprintf(fp, "%6.1f", *(u1+ix*ny+iy));
if (ix != nx-1)
fprintf(fp, " ");
else
fprintf(fp, "\n");
}
}
fclose(fp);
}
__global__ void cuda_update(float *u0, float *u1, struct Parms parms)
{
int ix, iy;
ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
iy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (ix > 0 && iy > 0)
{
if (ix + iy < NXPROB + NYPROB - 2)
{
*(u1+ix*NYPROB+iy) = *(u0+ix*NYPROB+iy) +
parms.cx * (*(u0+(ix+1)*NYPROB+iy) +
*(u0+(ix-1)*NYPROB+iy) -
2.0 * *(u0+ix*NYPROB+iy)) +
parms.cy * (*(u0+ix*NYPROB+iy+1) +
*(u0+ix*NYPROB+iy-1) -
2.0 * *(u0+ix*NYPROB+iy));
}
}
}
__global__ void MyKernel(int *a, int *b, int *c, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
int main (int argc, char *argv[])
{
int i;
float *u;
float *cuda_u0, *cuda_u1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms = 0.0f;
int block_size;
int min_grid, grid;
//http://devblogs.nvidia.com/parallelforall/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/
cudaOccupancyMaxPotentialBlockSize(&min_grid, &block_size, MyKernel, 0, NXPROB*NYPROB);
grid = (NXPROB*NYPROB + block_size - 1) / block_size;
dim3 dimBlocks(BLOCK_H, BLOCK_V);
dim3 dimThreads((NXPROB / BLOCK_H) + ((NXPROB % BLOCK_H) != 0), (NYPROB / BLOCK_V) + ((NYPROB % BLOCK_V) != 0));
//malloc host
u = (float*)malloc(NXPROB*NYPROB*sizeof(float));
//malloc device
cudaMalloc((void**)&cuda_u0, (NXPROB*NYPROB*sizeof(float)));
cudaMalloc((void**)&cuda_u1, (NXPROB*NYPROB*sizeof(float)));
printf("Grid size: X= %d Y= %d Time steps= %d\n",NXPROB,NYPROB,STEPS);
inidat(NXPROB, NYPROB, u); //initialize
//prtdat(NXPROB, NYPROB, u, "initial.dat"); //print
//copy from host to device
cudaMemcpy(cuda_u0, u, (NXPROB*NYPROB*sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_u1, u, (NXPROB*NYPROB*sizeof(float)), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
for (i = 0; i < STEPS; i+=2)
{
cuda_update<<<grid, block_size>>>(cuda_u0, cuda_u1, parms);
cuda_update<<<grid, block_size>>>(cuda_u1, cuda_u0, parms);
}
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
//copy from device to host
cudaMemcpy(u, cuda_u1, (NXPROB*NYPROB*sizeof(float)), cudaMemcpyDeviceToHost);
//prtdat(NXPROB, NYPROB, u, "final.dat"); //print
printf("Time: %f ms\n", ms);
cudaFree(cuda_u0);
cudaFree(cuda_u1);
free(u);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
3c70475ef1038c9be0a06df3f735c5d25a5b932b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
#include <hip/hip_runtime.h>
#include <math_constants.h>
using namespace std;
__constant__ int ROWS;
/*check error*/
void checkError(hipError_t err, char* message) {
if (err != hipSuccess)
{
fprintf(stderr, message, hipGetErrorString(err));
//exit(EXIT_FAILURE);
}
}
/*set value */
template<typename T>
void setElement(T* arr, int width, int row, int col, T value) {
arr[width * row + col] = value;
}
/*get value*/
template<typename T>
T getElement(T* arr, int width, int row, int col) {
return arr[row * width + col];
}
/*Fill an array 2D with values*/
template<typename T>
void initArray2D(T* arr, int rows, int cols, T value) {
int c = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
setElement(arr, cols, i, j, rand() % 10);
}
}
}
/*show array 2D*/
template<typename T>
void displayArray2D(T* arr, int rows, int cols) {
for (int i = 0; i < rows; i++) {
printf("\n");
for (int j = 0; j < cols; j++) {
cout << getElement(arr, cols, i, j) << " ";
}
}
}
/*show array 1D*/
template<typename T>
void displayArray(T* arr, int size) {
for (int i = 0; i < size; i++) {
cout << arr[i] << endl;
}
}
/*rand float*/
float randomFloat(float min, float max) {
float random = ((float)rand()) / (float)RAND_MAX;
float diff = max - min;
float r = random * diff;
return min + r;
}
int computeGlobalWorkSize(int dataSize, int localWorkSize)
{
return (dataSize%localWorkSize) ? dataSize - dataSize%localWorkSize +
localWorkSize : dataSize;
}
__global__ void findMin(float *dst, const float *src, int size)
{
extern volatile __shared__ float cache[];
int l_id = threadIdx.x;
int g_id = (blockDim.x * blockIdx.x) + l_id;
if (g_id < size)
cache[l_id] = src[g_id];
else
cache[l_id] = -FLT_MAX;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (l_id < s && l_id < size)
cache[l_id] = max(cache[l_id], cache[l_id + s]); // 2
__syncthreads();
}
if (l_id == 0)
dst[blockIdx.x] = cache[0];
}
int main(int argc, char **argv) {
int N = 100000;
float *data = new float[N];
size_t data_size = N * sizeof(float);
float min = 0, d_min = 0;
for (size_t i = 0; i < N; ++i) {
data[i] = randomFloat(0, 10);
//check cpu
min = fmax(min, data[i]);
}
float *dSrc, *dDst;
hipError_t err;
err = hipMalloc(&dSrc, data_size);
checkError(err, "blad alokacji");
err = hipMemcpy(dSrc, data, data_size, hipMemcpyHostToDevice);
checkError(err, "blad alokacji");
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cout <<"liczba blokow<< "<< blocksPerGrid << endl;
err = hipMalloc(&dDst, threadsPerBlock*sizeof(float));
checkError(err, "blad alokacji dDst");
err = hipMemcpyToSymbol(ROWS, &N, sizeof(int));
checkError(err, "blad alokacji ROWS");
size_t local_work_size = 256;
size_t global_work_size = computeGlobalWorkSize(N, local_work_size);
int num_work_groups = global_work_size / local_work_size;
int step = 0;
while (num_work_groups > 0)
{
cout << "Step " << ++step << endl;
cout << "Size of data to be reduced: " << N << endl;
cout << "Local work size: " << local_work_size << endl;
cout << "Global work size: " << global_work_size << endl;
cout << "Num of work-groups: " << num_work_groups << endl << endl;
findMin << < num_work_groups, local_work_size, local_work_size*sizeof(float) >> >(dDst, dSrc, N);
err = hipDeviceSynchronize();
checkError(err, "blad synchronizacji kernala");
if (num_work_groups > 1)
{
N = num_work_groups;
global_work_size = computeGlobalWorkSize(N, local_work_size);
num_work_groups = global_work_size / local_work_size;
float* tmp = dDst;
dSrc = dDst;
dDst = tmp;
}
else
num_work_groups = 0;
err = hipMemcpy(&d_min, dDst, sizeof(d_min), hipMemcpyDeviceToHost);
printf("Parallel min: %g \n", d_min);
}
err = hipMemcpy(&d_min, dDst, sizeof(d_min), hipMemcpyDeviceToHost);
printf("Parallel min: GPU: %g vs CPU: %g\n", d_min, min);
hipFree(dSrc);
dSrc = NULL;
hipFree(dDst);
dDst = NULL;
free(data);
system("pause");
} | 3c70475ef1038c9be0a06df3f735c5d25a5b932b.cu | #include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
#include <cuda_runtime.h>
#include <math_constants.h>
using namespace std;
__constant__ int ROWS;
/*check error*/
void checkError(cudaError_t err, char* message) {
if (err != cudaSuccess)
{
fprintf(stderr, message, cudaGetErrorString(err));
//exit(EXIT_FAILURE);
}
}
/*set value */
template<typename T>
void setElement(T* arr, int width, int row, int col, T value) {
arr[width * row + col] = value;
}
/*get value*/
template<typename T>
T getElement(T* arr, int width, int row, int col) {
return arr[row * width + col];
}
/*Fill an array 2D with values*/
template<typename T>
void initArray2D(T* arr, int rows, int cols, T value) {
int c = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
setElement(arr, cols, i, j, rand() % 10);
}
}
}
/*show array 2D*/
template<typename T>
void displayArray2D(T* arr, int rows, int cols) {
for (int i = 0; i < rows; i++) {
printf("\n");
for (int j = 0; j < cols; j++) {
cout << getElement(arr, cols, i, j) << " ";
}
}
}
/*show array 1D*/
template<typename T>
void displayArray(T* arr, int size) {
for (int i = 0; i < size; i++) {
cout << arr[i] << endl;
}
}
/*rand float*/
float randomFloat(float min, float max) {
float random = ((float)rand()) / (float)RAND_MAX;
float diff = max - min;
float r = random * diff;
return min + r;
}
int computeGlobalWorkSize(int dataSize, int localWorkSize)
{
return (dataSize%localWorkSize) ? dataSize - dataSize%localWorkSize +
localWorkSize : dataSize;
}
__global__ void findMin(float *dst, const float *src, int size)
{
extern volatile __shared__ float cache[];
int l_id = threadIdx.x;
int g_id = (blockDim.x * blockIdx.x) + l_id;
if (g_id < size)
cache[l_id] = src[g_id];
else
cache[l_id] = -FLT_MAX;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (l_id < s && l_id < size)
cache[l_id] = max(cache[l_id], cache[l_id + s]); // 2
__syncthreads();
}
if (l_id == 0)
dst[blockIdx.x] = cache[0];
}
int main(int argc, char **argv) {
int N = 100000;
float *data = new float[N];
size_t data_size = N * sizeof(float);
float min = 0, d_min = 0;
for (size_t i = 0; i < N; ++i) {
data[i] = randomFloat(0, 10);
//check cpu
min = fmax(min, data[i]);
}
float *dSrc, *dDst;
cudaError_t err;
err = cudaMalloc(&dSrc, data_size);
checkError(err, "blad alokacji");
err = cudaMemcpy(dSrc, data, data_size, cudaMemcpyHostToDevice);
checkError(err, "blad alokacji");
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
cout <<"liczba blokow<< "<< blocksPerGrid << endl;
err = cudaMalloc(&dDst, threadsPerBlock*sizeof(float));
checkError(err, "blad alokacji dDst");
err = cudaMemcpyToSymbol(ROWS, &N, sizeof(int));
checkError(err, "blad alokacji ROWS");
size_t local_work_size = 256;
size_t global_work_size = computeGlobalWorkSize(N, local_work_size);
int num_work_groups = global_work_size / local_work_size;
int step = 0;
while (num_work_groups > 0)
{
cout << "Step " << ++step << endl;
cout << "Size of data to be reduced: " << N << endl;
cout << "Local work size: " << local_work_size << endl;
cout << "Global work size: " << global_work_size << endl;
cout << "Num of work-groups: " << num_work_groups << endl << endl;
findMin << < num_work_groups, local_work_size, local_work_size*sizeof(float) >> >(dDst, dSrc, N);
err = cudaDeviceSynchronize();
checkError(err, "blad synchronizacji kernala");
if (num_work_groups > 1)
{
N = num_work_groups;
global_work_size = computeGlobalWorkSize(N, local_work_size);
num_work_groups = global_work_size / local_work_size;
float* tmp = dDst;
dSrc = dDst;
dDst = tmp;
}
else
num_work_groups = 0;
err = cudaMemcpy(&d_min, dDst, sizeof(d_min), cudaMemcpyDeviceToHost);
printf("Parallel min: %g \n", d_min);
}
err = cudaMemcpy(&d_min, dDst, sizeof(d_min), cudaMemcpyDeviceToHost);
printf("Parallel min: GPU: %g vs CPU: %g\n", d_min, min);
cudaFree(dSrc);
dSrc = NULL;
cudaFree(dDst);
dDst = NULL;
free(data);
system("pause");
} |
ac25adda2e6be75fc59a447fd22159b991a953fc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <malloc.h>
#include <iostream>
#include <string>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
* This version of sumArrays adds host timers to measure GPU and CPU
* performance.
*/
#define CHECK(call) \
do { \
if (hipSuccess != call) { \
fprintf(stderr, ("CUDA ERROR! file: %s[%i] -> %s\n"), __FILE__, __LINE__, hipGetErrorString(call)); \
exit(0); \
} \
} while (0)
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
__global__ void sumArraysOnGPUDivergence(float *A, float *B, float *C, const int N)
{
// Can you draw the execution time of a warp?
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
if (threadIdx.x == 0) {
int a;
for (int j = 0; j < 10; j++) a += j;
}
C[i] = A[i] + B[i];
}
}
__global__ void sumArraysOnGPUOffset(float *A, float *B, float *C, const int N)
{
// What is the implications on the memory fetching process?
// /!\ the sum of the vectors is not the right one. This kernel is here for the example
// /!\ However the test will not fail if you don't change the offsets as C is already filled with right values
const int offset = 10;
const int offset2 = 5;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = i + offset;
if (j < N) C[i + offset2] = A[j] + B[j];
}
__global__ void sumArraysOnGPUNoCoalescence(float *A, float *B, float *C, const int N)
{
// Which thread is fetching which memory address?
// /!\ the sum of the vectors is not the right one. This kernel is here for the example
// /!\ However the test will not fail if you don't change the offsets as C is already filled with right values
//int i = blockIdx.x * blockDim.x + threadIdx.x;
int i = threadIdx.x * blockDim.x + blockIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 10;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 512;
dim3 blockDim(iLen);
dim3 gridDim((nElem + blockDim.x - 1) / blockDim.x);
// --------KERNEL coalesced sum
sumArraysOnGPU << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(hipDeviceSynchronize());
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// --------KERNEL divergent sum
sumArraysOnGPUDivergence << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(hipDeviceSynchronize());
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// --------KERNEL coalesced sum with offset
sumArraysOnGPUOffset << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(hipDeviceSynchronize());
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// --------KERNEL non-coalesced sum
sumArraysOnGPUNoCoalescence << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(hipDeviceSynchronize());
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
| ac25adda2e6be75fc59a447fd22159b991a953fc.cu | #include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <malloc.h>
#include <iostream>
#include <string>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
* This version of sumArrays adds host timers to measure GPU and CPU
* performance.
*/
#define CHECK(call) \
do { \
if (cudaSuccess != call) { \
fprintf(stderr, ("CUDA ERROR! file: %s[%i] -> %s\n"), __FILE__, __LINE__, cudaGetErrorString(call)); \
exit(0); \
} \
} while (0)
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned)time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
__global__ void sumArraysOnGPUDivergence(float *A, float *B, float *C, const int N)
{
// Can you draw the execution time of a warp?
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
if (threadIdx.x == 0) {
int a;
for (int j = 0; j < 10; j++) a += j;
}
C[i] = A[i] + B[i];
}
}
__global__ void sumArraysOnGPUOffset(float *A, float *B, float *C, const int N)
{
// What is the implications on the memory fetching process?
// /!\ the sum of the vectors is not the right one. This kernel is here for the example
// /!\ However the test will not fail if you don't change the offsets as C is already filled with right values
const int offset = 10;
const int offset2 = 5;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = i + offset;
if (j < N) C[i + offset2] = A[j] + B[j];
}
__global__ void sumArraysOnGPUNoCoalescence(float *A, float *B, float *C, const int N)
{
// Which thread is fetching which memory address?
// /!\ the sum of the vectors is not the right one. This kernel is here for the example
// /!\ However the test will not fail if you don't change the offsets as C is already filled with right values
//int i = blockIdx.x * blockDim.x + threadIdx.x;
int i = threadIdx.x * blockDim.x + blockIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 10;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 512;
dim3 blockDim(iLen);
dim3 gridDim((nElem + blockDim.x - 1) / blockDim.x);
// --------KERNEL coalesced sum
sumArraysOnGPU << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(cudaDeviceSynchronize());
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// --------KERNEL divergent sum
sumArraysOnGPUDivergence << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(cudaDeviceSynchronize());
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// --------KERNEL coalesced sum with offset
sumArraysOnGPUOffset << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(cudaDeviceSynchronize());
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// --------KERNEL non-coalesced sum
sumArraysOnGPUNoCoalescence << <gridDim, blockDim >> >(d_A, d_B, d_C, nElem);
CHECK(cudaDeviceSynchronize());
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
|
f1a95e9974ffc837739172738052c57dbae74b0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float* x, float* y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
// Wait for the GPU to finish before accessing on CPU
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
// Run program: Ctrl + F5 or Debug > Start Without Debugging menu
// Debug program: F5 or Debug > Start Debugging menu
// Tips for Getting Started:
// 1. Use the Solution Explorer window to add/manage files
// 2. Use the Team Explorer window to connect to source control
// 3. Use the Output window to see build output and other messages
// 4. Use the Error List window to view errors
// 5. Go to Project > Add New Item to create new code files, or Project > Add Existing Item to add existing code files to the project
// 6. In the future, to open this project again, go to File > Open > Project and select the .sln file
| f1a95e9974ffc837739172738052c57dbae74b0b.cu | #include <iostream>
#include <math.h>
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float* x, float* y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
add<<<1, 1>>>(N, x, y);
// Wait for the GPU to finish before accessing on CPU
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
// Run program: Ctrl + F5 or Debug > Start Without Debugging menu
// Debug program: F5 or Debug > Start Debugging menu
// Tips for Getting Started:
// 1. Use the Solution Explorer window to add/manage files
// 2. Use the Team Explorer window to connect to source control
// 3. Use the Output window to see build output and other messages
// 4. Use the Error List window to view errors
// 5. Go to Project > Add New Item to create new code files, or Project > Add Existing Item to add existing code files to the project
// 6. In the future, to open this project again, go to File > Open > Project and select the .sln file
|
dd5f2a6418c76d90e51a2b96eb90fd7235006253.hip | // !!! This is a file automatically generated by hipify!!!
// Andrew Gloster
// May 2018
// Copyright 2018 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*! \file custenCreateDestroy2DXYp.cu
Functions to create and destroy the cuSten_t that is used to give input to the compute kernels.
2D xy direction, periodic
*/
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <iostream>
// ---------------------------------------------------------------------
// Custom libraries and headers
// ---------------------------------------------------------------------
#include "cuSten_struct_type.h"
#include "cuSten_struct_functions.h"
#include "../util/util.h"
// ---------------------------------------------------------------------
// Function to create the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenCreate2DXYp
\brief Function to set up cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
\param numTiles Number of tiles to divide the data into
\param nx Total number of points in the x direction
\param ny Total number of points in the y direction
\param BLOCK_X Size of thread block in the x direction
\param BLOCK_Y Size of thread block in the y direction
\param dataOutput Pointer to data output by the function
\param dataInput Pointer to data input to the function
\param weights Pointer to input weights for each point in the stencil
\param numStenHoriz Total number of points in the stencil in the x direction
\param numStenLeft Number of points on the left side of the stencil
\param numStenRight Number of points on the right side of the stencil
\param numStenHoriz Total number of points in the stencil in the y direction
\param numStenTop Number of points on the top of the stencil
\param numStenBottom Number of points on the bottom of the stencil
*/
void cuStenCreate2DXYp(
cuSten_t* pt_cuSten,
int deviceNum,
int numTiles,
int nx,
int ny,
int BLOCK_X,
int BLOCK_Y,
double* dataNew,
double* dataOld,
double* weights,
int numStenHoriz,
int numStenLeft,
int numStenRight,
int numStenVert,
int numStenTop,
int numStenBottom
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set the device number associated with the struct
pt_cuSten->deviceNum = deviceNum;
// Set the number of streams
pt_cuSten->numStreams = 3;
// Set the number of tiles
pt_cuSten->numTiles = numTiles;
// Set the number points in x on the device
pt_cuSten->nx = nx;
// Set the number points in y on the device
pt_cuSten->ny = ny;
// Number of threads in x on the device
pt_cuSten->BLOCK_X = BLOCK_X;
// Number of threads in y on the device
pt_cuSten->BLOCK_Y = BLOCK_Y;
// Set current active compute device
hipSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Create memeory for the streams
pt_cuSten->streams = (hipStream_t*)malloc(pt_cuSten->numStreams * sizeof(hipStream_t*));
// Create the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
hipStreamCreate(&pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Creating stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Create memeory for the events
pt_cuSten->events = (hipEvent_t*)malloc(2 * sizeof(hipEvent_t*));
// Create the events
for (int ev = 0; ev < 2; ev++)
{
hipEventCreate(&pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Creating event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Set number of points in the stencil
pt_cuSten->numSten = numStenHoriz * numStenVert;
// Set number of points to the left in the stencil
pt_cuSten->numStenLeft = numStenLeft;
// Set number of points to the right in the stencil
pt_cuSten->numStenRight = numStenRight;
// Set number of points in the top the stencil
pt_cuSten->numStenTop = numStenTop;
// Set number of points in the bottom of the stencil
pt_cuSten->numStenBottom = numStenBottom;
// Set local block array sizes - x direction
pt_cuSten->nxLocal = pt_cuSten->BLOCK_X + pt_cuSten->numStenLeft + pt_cuSten->numStenRight;
// Set loacl block array sizes - y direction
pt_cuSten->nyLocal = pt_cuSten->BLOCK_Y + pt_cuSten->numStenTop + pt_cuSten->numStenBottom;
// Set the amount of shared memory required
pt_cuSten->mem_shared = (pt_cuSten->nxLocal * pt_cuSten->nyLocal) * sizeof(double) + pt_cuSten->numSten * sizeof(double);
// Find number of points per tile
pt_cuSten->nyTile = pt_cuSten->ny / pt_cuSten->numTiles;
// Set the grid up
pt_cuSten->xGrid = (pt_cuSten->nx % pt_cuSten->BLOCK_X == 0) ? (pt_cuSten->nx / pt_cuSten->BLOCK_X) : (pt_cuSten->nx / pt_cuSten->BLOCK_X + 1);
pt_cuSten->yGrid = (pt_cuSten->nyTile % pt_cuSten->BLOCK_Y == 0) ? (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y) : (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y + 1);
// Set the device weights pointer
pt_cuSten->weights = weights;
// Allocate the pointers for each input tile
pt_cuSten->dataInput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate the pointers for each output tile
pt_cuSten->dataOutput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// // Tile offset index
int offset = pt_cuSten->nx * pt_cuSten->nyTile;
// // Match the pointers to the data
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Set the input data
pt_cuSten->dataInput[tile] = &dataOld[tile * offset];
// Set the output data
pt_cuSten->dataOutput[tile] = &dataNew[tile * offset];
}
// Create cases depending on what tile numbers - Periodic
// 1 tile
// 2 tiles
// 3 or greater
// Allocate top boundary memory
pt_cuSten->boundaryTop = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate bottom boundary memory
pt_cuSten->boundaryBottom = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataOld[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataOld[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataOld[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataOld[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataOld[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataOld[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataOld[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataOld[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataOld[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataOld[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataOld[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataOld[0];
break;
}
// Number of points in top boundary data
pt_cuSten->numBoundaryTop = pt_cuSten->numStenTop * pt_cuSten->nx;
// Number of points in bottom boundary data
pt_cuSten->numBoundaryBottom = pt_cuSten->numStenBottom * pt_cuSten->nx;
// Number of points in a horizontal stencil
pt_cuSten->numStenHoriz = numStenHoriz;
// Number of points in a vertical stencil
pt_cuSten->numStenVert = numStenVert;
}
// ---------------------------------------------------------------------
// Swap pointers
// ---------------------------------------------------------------------
/*! \fun void cuStenSwap2DXYp
\brief Function to swap pointers necessary for timestepping
\param pt_cuSten Pointer to cuSten type provided by user
\param dataInput Pointer to data input to the on the next compute
*/
void cuStenSwap2DXYp(
cuSten_t* pt_cuSten,
double* dataInput
)
{
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Swap the input and output data
std::swap(pt_cuSten->dataInput[tile], pt_cuSten->dataOutput[tile]);
// Update the boundary data
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataInput[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataInput[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataInput[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataInput[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataInput[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataInput[0];
break;
}
}
}
// ---------------------------------------------------------------------
// Function to destroy the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenDestroy2DXYp
\brief Function to destroy data associated with cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
*/
void cuStenDestroy2DXYp(
cuSten_t* pt_cuSten
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set current active compute device
hipSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Destroy the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
hipStreamDestroy(pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Destroying stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->streams);
// // Create the events
for (int ev = 0; ev < 2; ev++)
{
hipEventDestroy(pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Destroying event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->events);
// Free the pointers for each input tile
free(pt_cuSten->dataInput);
// Free the pointers for each output tile
free(pt_cuSten->dataOutput);
// Free the top boundary tile pointers
free(pt_cuSten->boundaryTop);
// Free the bottom boundary tile pointers
free(pt_cuSten->boundaryBottom);
}
// ---------------------------------------------------------------------
// End of file
// --------------------------------------------------------------------- | dd5f2a6418c76d90e51a2b96eb90fd7235006253.cu | // Andrew Gloster
// May 2018
// Copyright 2018 Andrew Gloster
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*! \file custenCreateDestroy2DXYp.cu
Functions to create and destroy the cuSten_t that is used to give input to the compute kernels.
2D xy direction, periodic
*/
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
#include <iostream>
// ---------------------------------------------------------------------
// Custom libraries and headers
// ---------------------------------------------------------------------
#include "cuSten_struct_type.h"
#include "cuSten_struct_functions.h"
#include "../util/util.h"
// ---------------------------------------------------------------------
// Function to create the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenCreate2DXYp
\brief Function to set up cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
\param numTiles Number of tiles to divide the data into
\param nx Total number of points in the x direction
\param ny Total number of points in the y direction
\param BLOCK_X Size of thread block in the x direction
\param BLOCK_Y Size of thread block in the y direction
\param dataOutput Pointer to data output by the function
\param dataInput Pointer to data input to the function
\param weights Pointer to input weights for each point in the stencil
\param numStenHoriz Total number of points in the stencil in the x direction
\param numStenLeft Number of points on the left side of the stencil
\param numStenRight Number of points on the right side of the stencil
\param numStenHoriz Total number of points in the stencil in the y direction
\param numStenTop Number of points on the top of the stencil
\param numStenBottom Number of points on the bottom of the stencil
*/
void cuStenCreate2DXYp(
cuSten_t* pt_cuSten,
int deviceNum,
int numTiles,
int nx,
int ny,
int BLOCK_X,
int BLOCK_Y,
double* dataNew,
double* dataOld,
double* weights,
int numStenHoriz,
int numStenLeft,
int numStenRight,
int numStenVert,
int numStenTop,
int numStenBottom
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set the device number associated with the struct
pt_cuSten->deviceNum = deviceNum;
// Set the number of streams
pt_cuSten->numStreams = 3;
// Set the number of tiles
pt_cuSten->numTiles = numTiles;
// Set the number points in x on the device
pt_cuSten->nx = nx;
// Set the number points in y on the device
pt_cuSten->ny = ny;
// Number of threads in x on the device
pt_cuSten->BLOCK_X = BLOCK_X;
// Number of threads in y on the device
pt_cuSten->BLOCK_Y = BLOCK_Y;
// Set current active compute device
cudaSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Create memeory for the streams
pt_cuSten->streams = (cudaStream_t*)malloc(pt_cuSten->numStreams * sizeof(cudaStream_t*));
// Create the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
cudaStreamCreate(&pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Creating stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Create memeory for the events
pt_cuSten->events = (cudaEvent_t*)malloc(2 * sizeof(cudaEvent_t*));
// Create the events
for (int ev = 0; ev < 2; ev++)
{
cudaEventCreate(&pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Creating event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Set number of points in the stencil
pt_cuSten->numSten = numStenHoriz * numStenVert;
// Set number of points to the left in the stencil
pt_cuSten->numStenLeft = numStenLeft;
// Set number of points to the right in the stencil
pt_cuSten->numStenRight = numStenRight;
// Set number of points in the top the stencil
pt_cuSten->numStenTop = numStenTop;
// Set number of points in the bottom of the stencil
pt_cuSten->numStenBottom = numStenBottom;
// Set local block array sizes - x direction
pt_cuSten->nxLocal = pt_cuSten->BLOCK_X + pt_cuSten->numStenLeft + pt_cuSten->numStenRight;
// Set loacl block array sizes - y direction
pt_cuSten->nyLocal = pt_cuSten->BLOCK_Y + pt_cuSten->numStenTop + pt_cuSten->numStenBottom;
// Set the amount of shared memory required
pt_cuSten->mem_shared = (pt_cuSten->nxLocal * pt_cuSten->nyLocal) * sizeof(double) + pt_cuSten->numSten * sizeof(double);
// Find number of points per tile
pt_cuSten->nyTile = pt_cuSten->ny / pt_cuSten->numTiles;
// Set the grid up
pt_cuSten->xGrid = (pt_cuSten->nx % pt_cuSten->BLOCK_X == 0) ? (pt_cuSten->nx / pt_cuSten->BLOCK_X) : (pt_cuSten->nx / pt_cuSten->BLOCK_X + 1);
pt_cuSten->yGrid = (pt_cuSten->nyTile % pt_cuSten->BLOCK_Y == 0) ? (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y) : (pt_cuSten->nyTile / pt_cuSten->BLOCK_Y + 1);
// Set the device weights pointer
pt_cuSten->weights = weights;
// Allocate the pointers for each input tile
pt_cuSten->dataInput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate the pointers for each output tile
pt_cuSten->dataOutput = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// // Tile offset index
int offset = pt_cuSten->nx * pt_cuSten->nyTile;
// // Match the pointers to the data
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Set the input data
pt_cuSten->dataInput[tile] = &dataOld[tile * offset];
// Set the output data
pt_cuSten->dataOutput[tile] = &dataNew[tile * offset];
}
// Create cases depending on what tile numbers - Periodic
// 1 tile
// 2 tiles
// 3 or greater
// Allocate top boundary memory
pt_cuSten->boundaryTop = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
// Allocate bottom boundary memory
pt_cuSten->boundaryBottom = (double**)malloc(pt_cuSten->numTiles * sizeof(double));
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataOld[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataOld[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataOld[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataOld[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataOld[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataOld[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataOld[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataOld[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataOld[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataOld[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataOld[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataOld[0];
break;
}
// Number of points in top boundary data
pt_cuSten->numBoundaryTop = pt_cuSten->numStenTop * pt_cuSten->nx;
// Number of points in bottom boundary data
pt_cuSten->numBoundaryBottom = pt_cuSten->numStenBottom * pt_cuSten->nx;
// Number of points in a horizontal stencil
pt_cuSten->numStenHoriz = numStenHoriz;
// Number of points in a vertical stencil
pt_cuSten->numStenVert = numStenVert;
}
// ---------------------------------------------------------------------
// Swap pointers
// ---------------------------------------------------------------------
/*! \fun void cuStenSwap2DXYp
\brief Function to swap pointers necessary for timestepping
\param pt_cuSten Pointer to cuSten type provided by user
\param dataInput Pointer to data input to the on the next compute
*/
void cuStenSwap2DXYp(
cuSten_t* pt_cuSten,
double* dataInput
)
{
for (int tile = 0; tile < pt_cuSten->numTiles; tile++)
{
// Swap the input and output data
std::swap(pt_cuSten->dataInput[tile], pt_cuSten->dataOutput[tile]);
// Update the boundary data
switch(pt_cuSten->numTiles)
{
// One tile only requires single top and bottom to be set
case 1:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[0];
break;
// Two tiles requires a special case of only setting two tiles
case 2:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
pt_cuSten->boundaryTop[1] = &dataInput[(pt_cuSten->nyTile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[1] = &dataInput[0];
break;
// Default case has interiors, so set the top tile, then loop over interior, then set the bottom tile
default:
pt_cuSten->boundaryTop[0] = &dataInput[(pt_cuSten->ny - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[0] = &dataInput[pt_cuSten->nyTile * pt_cuSten->nx];
for (int tile = 1; tile < pt_cuSten->numTiles - 1; tile++)
{
pt_cuSten->boundaryTop[tile] = &dataInput[(pt_cuSten->nyTile * tile - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[tile] = &dataInput[(pt_cuSten->nyTile * (tile + 1)) * pt_cuSten->nx];
}
pt_cuSten->boundaryTop[pt_cuSten->numTiles - 1] = &dataInput[(pt_cuSten->nyTile * (pt_cuSten->numTiles - 1) - pt_cuSten->numStenTop) * pt_cuSten->nx];
pt_cuSten->boundaryBottom[pt_cuSten->numTiles - 1] = &dataInput[0];
break;
}
}
}
// ---------------------------------------------------------------------
// Function to destroy the struct
// ---------------------------------------------------------------------
/*! \fun void cuStenDestroy2DXYp
\brief Function to destroy data associated with cuSten_t
\param pt_cuSten Pointer to cuSten type provided by user
*/
void cuStenDestroy2DXYp(
cuSten_t* pt_cuSten
)
{
// Buffer used for error checking
char msgStringBuffer[1024];
// Set current active compute device
cudaSetDevice(pt_cuSten->deviceNum);
sprintf(msgStringBuffer, "Setting current device to GPU %d", pt_cuSten->deviceNum);
checkError(msgStringBuffer);
// Destroy the streams
for (int st = 0; st < pt_cuSten->numStreams; st++)
{
cudaStreamDestroy(pt_cuSten->streams[st]);
sprintf(msgStringBuffer, "Destroying stream %d on GPU %d", st, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->streams);
// // Create the events
for (int ev = 0; ev < 2; ev++)
{
cudaEventDestroy(pt_cuSten->events[ev]);
sprintf(msgStringBuffer, "Destroying event %d on GPU %d", ev, pt_cuSten->deviceNum);
checkError(msgStringBuffer);
}
// Free the main memory
free(pt_cuSten->events);
// Free the pointers for each input tile
free(pt_cuSten->dataInput);
// Free the pointers for each output tile
free(pt_cuSten->dataOutput);
// Free the top boundary tile pointers
free(pt_cuSten->boundaryTop);
// Free the bottom boundary tile pointers
free(pt_cuSten->boundaryBottom);
}
// ---------------------------------------------------------------------
// End of file
// --------------------------------------------------------------------- |
556945a8ef0e317660ae3d9e87fa5e198636f554.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
// kernel menambahkan vector
__global__
void tambahVector(
const float *cVectorA,
const float *cVectorB,
float *cVectorC,
const int cJumlahElemen)
{
// cari indeks saya
int idx_ = 0;
}
// fungsi main untuk panggil kernel
int main(void)
{
// gunakan GPU ke-1
hipSetDevice(0);
const int kJumlahElemen = 25600;
size_t ukuran_vector_bytes_ = kJumlahElemen * sizeof(float);
std::cout << "[Penjumlahan vector dengan jumlah elemen " << kJumlahElemen << std::endl;
float *h_A_ = (float *)malloc(ukuran_vector_bytes_);
float *h_B_ = (float *)malloc(ukuran_vector_bytes_);
float *h_C_ = (float *)malloc(ukuran_vector_bytes_);
if (h_A_ == NULL || h_B_ == NULL || h_C_ == NULL)
{
std::cerr << "Failed to allocate host vectors!\n";
exit(-1);
}
for (int i = 0; i < kJumlahElemen; ++i)
{
h_A_[i] = rand()/(float)RAND_MAX;
h_B_[i] = rand()/(float)RAND_MAX;
}
float *d_A_ = NULL;
float *d_B_ = NULL;
float *d_C_ = NULL;
hipMalloc((void **)&d_A_, ukuran_vector_bytes_);
hipMalloc((void **)&d_B_, ukuran_vector_bytes_);
hipMalloc((void **)&d_C_, ukuran_vector_bytes_);
std::cout << "Salin input dari host ke CUDA device\n";
hipMemcpy(d_A_, h_A_, ukuran_vector_bytes_, hipMemcpyHostToDevice);
hipMemcpy(d_B_, h_B_, ukuran_vector_bytes_, hipMemcpyHostToDevice);
int threads_per_block_ = 256;
int blocks_per_grid_ = 100;
dim3 block(threads_per_block_, 1, 1);
dim3 grid(blocks_per_grid_, 1, 1);
std::cout << "Peluncuran kernel Cuda dengan ukuran " << blocks_per_grid_ << " block " << threads_per_block_ << " threads\n";
hipLaunchKernelGGL(( tambahVector), dim3(grid),dim3(block), 0, 0, d_A_,d_B_,d_C_,kJumlahElemen);
hipError_t err_ = hipGetLastError();
if (err_ != hipSuccess)
{
std::cerr << "Gagal meluncurkan kernel Cuda (error code " << hipGetErrorString(err_) << ")!\n";
exit(-1);
}
std::cout << "Salin data dari CUDA device ke host memory\n";
hipMemcpy(h_C_, d_C_, ukuran_vector_bytes_, hipMemcpyDeviceToHost);
// verifikasi nilai
for (int i = 0; i < kJumlahElemen; ++i)
{
if (fabs(h_A_[i] + h_B_[i] - h_C_[i]) > 1e-5)
{
std::cerr << "Verifikasi gagal " << i << "!\n";
exit(-1);
}
}
std::cout << "Test PASSED\n";
hipFree(d_A_);
hipFree(d_B_);
hipFree(d_C_);
free(h_A_);
free(h_B_);
free(h_C_);
hipDeviceReset();
std::cout << "Done\n";
return 0;
}
| 556945a8ef0e317660ae3d9e87fa5e198636f554.cu | #include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// kernel menambahkan vector
__global__
void tambahVector(
const float *cVectorA,
const float *cVectorB,
float *cVectorC,
const int cJumlahElemen)
{
// cari indeks saya
int idx_ = 0;
}
// fungsi main untuk panggil kernel
int main(void)
{
// gunakan GPU ke-1
cudaSetDevice(0);
const int kJumlahElemen = 25600;
size_t ukuran_vector_bytes_ = kJumlahElemen * sizeof(float);
std::cout << "[Penjumlahan vector dengan jumlah elemen " << kJumlahElemen << std::endl;
float *h_A_ = (float *)malloc(ukuran_vector_bytes_);
float *h_B_ = (float *)malloc(ukuran_vector_bytes_);
float *h_C_ = (float *)malloc(ukuran_vector_bytes_);
if (h_A_ == NULL || h_B_ == NULL || h_C_ == NULL)
{
std::cerr << "Failed to allocate host vectors!\n";
exit(-1);
}
for (int i = 0; i < kJumlahElemen; ++i)
{
h_A_[i] = rand()/(float)RAND_MAX;
h_B_[i] = rand()/(float)RAND_MAX;
}
float *d_A_ = NULL;
float *d_B_ = NULL;
float *d_C_ = NULL;
cudaMalloc((void **)&d_A_, ukuran_vector_bytes_);
cudaMalloc((void **)&d_B_, ukuran_vector_bytes_);
cudaMalloc((void **)&d_C_, ukuran_vector_bytes_);
std::cout << "Salin input dari host ke CUDA device\n";
cudaMemcpy(d_A_, h_A_, ukuran_vector_bytes_, cudaMemcpyHostToDevice);
cudaMemcpy(d_B_, h_B_, ukuran_vector_bytes_, cudaMemcpyHostToDevice);
int threads_per_block_ = 256;
int blocks_per_grid_ = 100;
dim3 block(threads_per_block_, 1, 1);
dim3 grid(blocks_per_grid_, 1, 1);
std::cout << "Peluncuran kernel Cuda dengan ukuran " << blocks_per_grid_ << " block " << threads_per_block_ << " threads\n";
tambahVector<<<grid,block>>>(d_A_,d_B_,d_C_,kJumlahElemen);
cudaError_t err_ = cudaGetLastError();
if (err_ != cudaSuccess)
{
std::cerr << "Gagal meluncurkan kernel Cuda (error code " << cudaGetErrorString(err_) << ")!\n";
exit(-1);
}
std::cout << "Salin data dari CUDA device ke host memory\n";
cudaMemcpy(h_C_, d_C_, ukuran_vector_bytes_, cudaMemcpyDeviceToHost);
// verifikasi nilai
for (int i = 0; i < kJumlahElemen; ++i)
{
if (fabs(h_A_[i] + h_B_[i] - h_C_[i]) > 1e-5)
{
std::cerr << "Verifikasi gagal " << i << "!\n";
exit(-1);
}
}
std::cout << "Test PASSED\n";
cudaFree(d_A_);
cudaFree(d_B_);
cudaFree(d_C_);
free(h_A_);
free(h_B_);
free(h_C_);
cudaDeviceReset();
std::cout << "Done\n";
return 0;
}
|
34a88467efc76278e7a58183290efab5c6b9971c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernGaussianBlur.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int width = XSIZE;
int height = YSIZE;
uint8_t *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
uint8_t *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernGaussianBlur), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,dst,src);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernGaussianBlur), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,dst,src);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernGaussianBlur), dim3(gridBlock),dim3(threadBlock), 0, 0, width,height,dst,src);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 34a88467efc76278e7a58183290efab5c6b9971c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernGaussianBlur.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int width = XSIZE;
int height = YSIZE;
uint8_t *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
uint8_t *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernGaussianBlur<<<gridBlock,threadBlock>>>(width,height,dst,src);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernGaussianBlur<<<gridBlock,threadBlock>>>(width,height,dst,src);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernGaussianBlur<<<gridBlock,threadBlock>>>(width,height,dst,src);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ea8b3194136e9d5a73850cc3407dd862a2a3b67c.hip | // !!! This is a file automatically generated by hipify!!!
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/squeeze.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include "ppl/common/types.h"
#include <hip/hip_runtime.h>
ppl::common::RetCode PPLCUDASqueezeForwardImp(
hipStream_t stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
int64_t num_elems_output = output_shape->GetElementsIncludingPadding();
hipMemcpyAsync(output, input, ppl::common::GetSizeOfDataType(input_shape->GetDataType()) * num_elems_output, hipMemcpyDeviceToDevice, stream);
return ppl::common::RC_SUCCESS;
} | ea8b3194136e9d5a73850cc3407dd862a2a3b67c.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/memory/squeeze.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include "ppl/common/types.h"
#include <cuda_runtime.h>
ppl::common::RetCode PPLCUDASqueezeForwardImp(
cudaStream_t stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* output_shape,
void* output)
{
int64_t num_elems_output = output_shape->GetElementsIncludingPadding();
cudaMemcpyAsync(output, input, ppl::common::GetSizeOfDataType(input_shape->GetDataType()) * num_elems_output, cudaMemcpyDeviceToDevice, stream);
return ppl::common::RC_SUCCESS;
} |
b1b4bca7604230fd3fd5d976736b1fb5bc8249e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main() {
int c;
int *dev_c;
hipMalloc((void **)&dev_c, sizeof(int));
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, 1, 1, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
printf("%d\n", c);
return 0;
}
| b1b4bca7604230fd3fd5d976736b1fb5bc8249e9.cu | #include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main() {
int c;
int *dev_c;
cudaMalloc((void **)&dev_c, sizeof(int));
add<<<1, 1>>>(1, 1, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
printf("%d\n", c);
return 0;
}
|
97a7f6cd2dd874a4cfae0a5cd9dfd9ff37bedc4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2019-04-24
* I'll attend several conferences and workshops in the following weeks -
* busy days :(
*/
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "Sub.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
mask entries of a given tensor (CUDA Kernel)
c = a - b * \beta
>> a - A matrix
>> mask - mask matrix
>> c - where we put masked a
>> size - the size of a/b/c
>> alpha - value
*/
__global__
void KernelMASK(DTYPE * a, int * mask, DTYPE * c, int size, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (mask[i] == 0) {
c[i] = alpha;
}
else {
c[i] = a[i];
}
}
}
/*
mask entries of a given tensor (cuda version)
>> a - a tensor
>> mask - mask tensor
>> c - where we put masked a
>> alpha - value
*/
void _CudaMask(const XTensor * a, const XTensor * mask, XTensor * c, DTYPE alpha)
{
CheckNTErrors(a && mask && c, "Empty tensor input!");
CheckNTErrors((a->unitNum == mask->unitNum && a->unitNum == c->unitNum),
"Unmatched tensors in addition!");
CheckNTErrors(mask->dataType == X_INT, "The mask tensor must be in X_INT!")
//CheckNTErrors((a->dataType == mask->dataType && a->dataType == c->dataType),
// "Unmatched tensors in addition!");
CheckNTErrors((a->devID == mask->devID && a->devID == c->devID),
"The tensors must be on the same!");
int devIDBackup = XDevice::GetGPUDevice();
XDevice::SetGPUDevice(a->devID);
if (!a->isSparse && !mask->isSparse) {
CheckNTErrors(!c->isSparse, "Illegal use of sparse matrix in addition!");
if (a->dataType == DEFAULT_DTYPE &&
mask->dataType == X_INT &&
c->dataType == DEFAULT_DTYPE)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
KernelMASK << <blocks, threads >> >((DTYPE*)a->data, (int *)mask->data, (DTYPE*)c->data, a->unitNum, alpha);
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
XDevice::SetGPUDevice(devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor) | 97a7f6cd2dd874a4cfae0a5cd9dfd9ff37bedc4b.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2019-04-24
* I'll attend several conferences and workshops in the following weeks -
* busy days :(
*/
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "Sub.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
mask entries of a given tensor (CUDA Kernel)
c = a - b * \beta
>> a - A matrix
>> mask - mask matrix
>> c - where we put masked a
>> size - the size of a/b/c
>> alpha - value
*/
__global__
void KernelMASK(DTYPE * a, int * mask, DTYPE * c, int size, DTYPE alpha)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (mask[i] == 0) {
c[i] = alpha;
}
else {
c[i] = a[i];
}
}
}
/*
mask entries of a given tensor (cuda version)
>> a - a tensor
>> mask - mask tensor
>> c - where we put masked a
>> alpha - value
*/
void _CudaMask(const XTensor * a, const XTensor * mask, XTensor * c, DTYPE alpha)
{
CheckNTErrors(a && mask && c, "Empty tensor input!");
CheckNTErrors((a->unitNum == mask->unitNum && a->unitNum == c->unitNum),
"Unmatched tensors in addition!");
CheckNTErrors(mask->dataType == X_INT, "The mask tensor must be in X_INT!")
//CheckNTErrors((a->dataType == mask->dataType && a->dataType == c->dataType),
// "Unmatched tensors in addition!");
CheckNTErrors((a->devID == mask->devID && a->devID == c->devID),
"The tensors must be on the same!");
int devIDBackup = XDevice::GetGPUDevice();
XDevice::SetGPUDevice(a->devID);
if (!a->isSparse && !mask->isSparse) {
CheckNTErrors(!c->isSparse, "Illegal use of sparse matrix in addition!");
if (a->dataType == DEFAULT_DTYPE &&
mask->dataType == X_INT &&
c->dataType == DEFAULT_DTYPE)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
KernelMASK << <blocks, threads >> >((DTYPE*)a->data, (int *)mask->data, (DTYPE*)c->data, a->unitNum, alpha);
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
XDevice::SetGPUDevice(devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
eb6a396791a33c9ad63dcf64d5487cab35441d8b.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************
Copyright 2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include <chrono>
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#include "urng.h"
#include "kernel.hip"
int main(int argc, char** argv)
{
if (argc != 5) {
printf("Usage: %s <path to file> <blockSizeX> <blockSizeY> <repeat>\n", argv[0]);
return 1;
}
const char* filePath = argv[1];
const int blockSizeX = atoi(argv[2]);
const int blockSizeY = atoi(argv[3]);
const int iterations = atoi(argv[4]);
// load input bitmap image
SDKBitMap inputBitmap;
inputBitmap.load(filePath);
if(!inputBitmap.isLoaded())
{
std::cout << "Failed to load input image!";
return -1;
}
// get width and height of input image
int height = inputBitmap.getHeight();
int width = inputBitmap.getWidth();
size_t imageSize = height * width * sizeof(uchar4);
std::cout << "Image " << filePath;
std::cout << " height: " << height;
std::cout << " width: " << width << std::endl;
// allocate memory for input & output image data
uchar4* inputImageData = (uchar4*)malloc(imageSize);
// allocate memory for output image data
uchar4* outputImageData = (uchar4*)malloc(imageSize);
// initializa the Image data to NULL
memset(outputImageData, 0, imageSize);
// get the pointer to pixel data
uchar4 *pixelData = inputBitmap.getPixels();
if(pixelData == NULL)
{
std::cout << "Failed to read pixel Data!";
free(inputImageData);
free(outputImageData);
return -1;
}
// Copy pixel data into inputImageData
memcpy(inputImageData, pixelData, imageSize);
// allocate memory for verification output
uchar4 *verificationOutput = (uchar4*)malloc(imageSize);
// initialize the data to NULL
memset(verificationOutput, 0, imageSize);
const int factor = FACTOR;
uchar4 *inputImageBuffer;
hipMalloc((void**)&inputImageBuffer, imageSize);
uchar4 *outputImageBuffer;
hipMalloc((void**)&outputImageBuffer, imageSize);
dim3 grid (height * width / (blockSizeY * blockSizeX));
dim3 block (blockSizeY * blockSizeX); // maximum work-group size is 256
std::cout << "Executing kernel for " << iterations << " iterations" <<std::endl;
std::cout << "-------------------------------------------" << std::endl;
hipMemcpy(inputImageBuffer, inputImageData, imageSize, hipMemcpyHostToDevice);
// warmup
hipLaunchKernelGGL(( noise_uniform), dim3(grid), dim3(block), 0, 0, inputImageBuffer, outputImageBuffer, factor);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(( noise_uniform), dim3(grid), dim3(block), 0, 0, inputImageBuffer, outputImageBuffer, factor);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
std::cout << "Average kernel execution time: " << (time * 1e-3f) / iterations << " (us)\n";
hipMemcpy(outputImageData, outputImageBuffer, imageSize, hipMemcpyDeviceToHost);
hipFree(inputImageBuffer);
hipFree(outputImageBuffer);
// verify
float mean = 0;
for(int i = 0; i < (int)(width * height); i++)
{
mean += outputImageData[i].x - inputImageData[i].x;
mean += outputImageData[i].y - inputImageData[i].y;
mean += outputImageData[i].z - inputImageData[i].z;
mean += outputImageData[i].w - inputImageData[i].w;
}
mean /= (imageSize * factor);
std::cout << "The averaged mean of the image: " << mean << std::endl;
if(fabs(mean) < 1.0)
{
std::cout << "PASS\n" << std::endl;
}
else
{
std::cout << "FAIL\n" << std::endl;
}
#ifdef DUMP
// copy output image data back to original pixel data
memcpy(pixelData, outputImageData, imageSize);
// write the output bmp file
if(!inputBitmap.write(OUTPUT_IMAGE))
std::cout << "Failed to write output image!";
else
std::cout << "Write output image!";
#endif
// release program resources (input memory etc.)
free(inputImageData);
free(outputImageData);
free(verificationOutput);
return 0;
}
| eb6a396791a33c9ad63dcf64d5487cab35441d8b.cu | /**********************************************************************
Copyright ©2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
• Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
• Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include <chrono>
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
#include "urng.h"
#include "kernel.cu"
int main(int argc, char** argv)
{
if (argc != 5) {
printf("Usage: %s <path to file> <blockSizeX> <blockSizeY> <repeat>\n", argv[0]);
return 1;
}
const char* filePath = argv[1];
const int blockSizeX = atoi(argv[2]);
const int blockSizeY = atoi(argv[3]);
const int iterations = atoi(argv[4]);
// load input bitmap image
SDKBitMap inputBitmap;
inputBitmap.load(filePath);
if(!inputBitmap.isLoaded())
{
std::cout << "Failed to load input image!";
return -1;
}
// get width and height of input image
int height = inputBitmap.getHeight();
int width = inputBitmap.getWidth();
size_t imageSize = height * width * sizeof(uchar4);
std::cout << "Image " << filePath;
std::cout << " height: " << height;
std::cout << " width: " << width << std::endl;
// allocate memory for input & output image data
uchar4* inputImageData = (uchar4*)malloc(imageSize);
// allocate memory for output image data
uchar4* outputImageData = (uchar4*)malloc(imageSize);
// initializa the Image data to NULL
memset(outputImageData, 0, imageSize);
// get the pointer to pixel data
uchar4 *pixelData = inputBitmap.getPixels();
if(pixelData == NULL)
{
std::cout << "Failed to read pixel Data!";
free(inputImageData);
free(outputImageData);
return -1;
}
// Copy pixel data into inputImageData
memcpy(inputImageData, pixelData, imageSize);
// allocate memory for verification output
uchar4 *verificationOutput = (uchar4*)malloc(imageSize);
// initialize the data to NULL
memset(verificationOutput, 0, imageSize);
const int factor = FACTOR;
uchar4 *inputImageBuffer;
cudaMalloc((void**)&inputImageBuffer, imageSize);
uchar4 *outputImageBuffer;
cudaMalloc((void**)&outputImageBuffer, imageSize);
dim3 grid (height * width / (blockSizeY * blockSizeX));
dim3 block (blockSizeY * blockSizeX); // maximum work-group size is 256
std::cout << "Executing kernel for " << iterations << " iterations" <<std::endl;
std::cout << "-------------------------------------------" << std::endl;
cudaMemcpy(inputImageBuffer, inputImageData, imageSize, cudaMemcpyHostToDevice);
// warmup
noise_uniform<<<grid, block>>>(inputImageBuffer, outputImageBuffer, factor);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int i = 0; i < iterations; i++)
{
noise_uniform<<<grid, block>>>(inputImageBuffer, outputImageBuffer, factor);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
std::cout << "Average kernel execution time: " << (time * 1e-3f) / iterations << " (us)\n";
cudaMemcpy(outputImageData, outputImageBuffer, imageSize, cudaMemcpyDeviceToHost);
cudaFree(inputImageBuffer);
cudaFree(outputImageBuffer);
// verify
float mean = 0;
for(int i = 0; i < (int)(width * height); i++)
{
mean += outputImageData[i].x - inputImageData[i].x;
mean += outputImageData[i].y - inputImageData[i].y;
mean += outputImageData[i].z - inputImageData[i].z;
mean += outputImageData[i].w - inputImageData[i].w;
}
mean /= (imageSize * factor);
std::cout << "The averaged mean of the image: " << mean << std::endl;
if(fabs(mean) < 1.0)
{
std::cout << "PASS\n" << std::endl;
}
else
{
std::cout << "FAIL\n" << std::endl;
}
#ifdef DUMP
// copy output image data back to original pixel data
memcpy(pixelData, outputImageData, imageSize);
// write the output bmp file
if(!inputBitmap.write(OUTPUT_IMAGE))
std::cout << "Failed to write output image!";
else
std::cout << "Write output image!";
#endif
// release program resources (input memory etc.)
free(inputImageData);
free(outputImageData);
free(verificationOutput);
return 0;
}
|
cubicFilter3D_kernel.hip | // !!! This is a file automatically generated by hipify!!!
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2009, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float interpolate_trilinear(texture<T, 3, mode> tex, float3 coord)
{
return tex3D(tex, coord.x, coord.y, coord.z);
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float interpolate_tricubic_simple(texture<T, 3, mode> tex, float3 coord)
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0;
for (float z=-1; z < 2.5; z++) //range [-1, 2]
{
float bsplineZ = bspline(z-fraction.z);
float w = index.z + z;
for (float y=-1; y < 2.5; y++)
{
float bsplineYZ = bspline(y-fraction.y) * bsplineZ;
float v = index.y + y;
for (float x=-1; x < 2.5; x++)
{
float bsplineXYZ = bspline(x-fraction.x) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D(tex, u, v, w);
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum hipTextureReadMode mode>
__device__ float interpolate_tricubic_fast(texture<T, 3, mode> tex, float3 coord)
{
// shift the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5;
const float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
float3 w0, w1, w2, w3;
bspline_weights(fraction, w0, w1, w2, w3);
const float3 g0 = w0 + w1;
const float3 g1 = w2 + w3;
const float3 h0 = (w1 / g0) - 0.5 + index; //h0 = w1/g0 - 1, move from [-0.5, extent-0.5] to [0, extent]
const float3 h1 = (w3 / g1) + 1.5 + index; //h1 = w3/g1 + 1, move from [-0.5, extent-0.5] to [0, extent]
// fetch the eight linear interpolations
// weighting and fetching is interleaved for performance and stability reasons
float tex000 = tex3D(tex, h0.x, h0.y, h0.z);
float tex100 = tex3D(tex, h1.x, h0.y, h0.z);
tex000 = lerp(tex100, tex000, g0.x); //weigh along the x-direction
float tex010 = tex3D(tex, h0.x, h1.y, h0.z);
float tex110 = tex3D(tex, h1.x, h1.y, h0.z);
tex010 = lerp(tex110, tex010, g0.x); //weigh along the x-direction
tex000 = lerp(tex010, tex000, g0.y); //weigh along the y-direction
float tex001 = tex3D(tex, h0.x, h0.y, h1.z);
float tex101 = tex3D(tex, h1.x, h0.y, h1.z);
tex001 = lerp(tex101, tex001, g0.x); //weigh along the x-direction
float tex011 = tex3D(tex, h0.x, h1.y, h1.z);
float tex111 = tex3D(tex, h1.x, h1.y, h1.z);
tex011 = lerp(tex111, tex011, g0.x); //weigh along the x-direction
tex001 = lerp(tex011, tex001, g0.y); //weigh along the y-direction
return lerp(tex001, tex000, g0.z); //weigh along the z-direction
}
#endif // _CUBIC3D_KERNEL_H_
| cubicFilter3D_kernel.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2009, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _CUBIC3D_KERNEL_H_
#define _CUBIC3D_KERNEL_H_
#include "bspline_kernel.cu"
//! Trilinearly interpolated texture lookup, using unnormalized coordinates.
//! This function merely serves as a reference for the tricubic versions.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float interpolate_trilinear(texture<T, 3, mode> tex, float3 coord)
{
return tex3D(tex, coord.x, coord.y, coord.z);
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Straight forward implementation, using 64 nearest neighbour lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float interpolate_tricubic_simple(texture<T, 3, mode> tex, float3 coord)
{
// transform the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5;
float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
index = index + 0.5; //move from [-0.5, extent-0.5] to [0, extent]
float result = 0.0;
for (float z=-1; z < 2.5; z++) //range [-1, 2]
{
float bsplineZ = bspline(z-fraction.z);
float w = index.z + z;
for (float y=-1; y < 2.5; y++)
{
float bsplineYZ = bspline(y-fraction.y) * bsplineZ;
float v = index.y + y;
for (float x=-1; x < 2.5; x++)
{
float bsplineXYZ = bspline(x-fraction.x) * bsplineYZ;
float u = index.x + x;
result += bsplineXYZ * tex3D(tex, u, v, w);
}
}
}
return result;
}
//! Tricubic interpolated texture lookup, using unnormalized coordinates.
//! Fast implementation, using 8 trilinear lookups.
//! @param tex 3D texture
//! @param coord unnormalized 3D texture coordinate
template<class T, enum cudaTextureReadMode mode>
__device__ float interpolate_tricubic_fast(texture<T, 3, mode> tex, float3 coord)
{
// shift the coordinate from [0,extent] to [-0.5, extent-0.5]
const float3 coord_grid = coord - 0.5;
const float3 index = floor(coord_grid);
const float3 fraction = coord_grid - index;
float3 w0, w1, w2, w3;
bspline_weights(fraction, w0, w1, w2, w3);
const float3 g0 = w0 + w1;
const float3 g1 = w2 + w3;
const float3 h0 = (w1 / g0) - 0.5 + index; //h0 = w1/g0 - 1, move from [-0.5, extent-0.5] to [0, extent]
const float3 h1 = (w3 / g1) + 1.5 + index; //h1 = w3/g1 + 1, move from [-0.5, extent-0.5] to [0, extent]
// fetch the eight linear interpolations
// weighting and fetching is interleaved for performance and stability reasons
float tex000 = tex3D(tex, h0.x, h0.y, h0.z);
float tex100 = tex3D(tex, h1.x, h0.y, h0.z);
tex000 = lerp(tex100, tex000, g0.x); //weigh along the x-direction
float tex010 = tex3D(tex, h0.x, h1.y, h0.z);
float tex110 = tex3D(tex, h1.x, h1.y, h0.z);
tex010 = lerp(tex110, tex010, g0.x); //weigh along the x-direction
tex000 = lerp(tex010, tex000, g0.y); //weigh along the y-direction
float tex001 = tex3D(tex, h0.x, h0.y, h1.z);
float tex101 = tex3D(tex, h1.x, h0.y, h1.z);
tex001 = lerp(tex101, tex001, g0.x); //weigh along the x-direction
float tex011 = tex3D(tex, h0.x, h1.y, h1.z);
float tex111 = tex3D(tex, h1.x, h1.y, h1.z);
tex011 = lerp(tex111, tex011, g0.x); //weigh along the x-direction
tex001 = lerp(tex011, tex001, g0.y); //weigh along the y-direction
return lerp(tex001, tex000, g0.z); //weigh along the z-direction
}
#endif // _CUBIC3D_KERNEL_H_
|
20d9c4ec9f583de8d1686f69a179d8d136cd3042.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "common.h"
#include "layers/conv.h"
#include "layers/dense.h"
#include "layers/pooling.h"
void test_dense(float *input, int input_channel, int output_channel) {
printf("****** Testing dense layer ******\n");
printf("*** Step 1: init dense layer ***\n");
Dense d(input_channel, output_channel);
d.dump();
printf("*** Step 2: forward ***\n");
float *doutput = d.forward(input);
float *output = (float*)malloc(sizeof(float) * output_channel);
hipMemcpy(output, doutput, sizeof(float) * output_channel, hipMemcpyDeviceToHost);
printf("Output: ");
for (int i=0; i<output_channel; i++) printf("%9.6f ", output[i]);
printf("\n\n");
for (int i=0; i<10; i++) output[i] = randn();
printf("Loss: ");
for (int i=0; i<10; i++) printf("%9.6f ", output[i]);
printf("\n\n");
float *loss;
hipMalloc(&loss, sizeof(float) * output_channel);
hipMemcpy(loss, output, sizeof(float) * 10, hipMemcpyHostToDevice);
printf("*** Step 3: backward ***\n");
float *ddelta = d.backward(loss, 0.1);
float *delta = (float*)malloc(sizeof(float) * input_channel);
hipMemcpy(delta, ddelta, sizeof(float) * input_channel, hipMemcpyDeviceToHost);
printf("Back-prop delta: ");
for (int i=0; i<input_channel; i++) printf("%9.6f ", delta[i]);
printf("\n\n");
d.dump();
free(output);
free(delta);
hipFree(loss);
}
void test_pooling(float *input, int c, int h, int w, int s) {
printf("***** Testing pooling layer ******\n");
printf("*** Step 1: init pooling layer ***\n");
Pooling p(c, h, w, s);
p.dump();
printf("\n");
printf("*** Step 2: forward ***\n");
float *doutput = p.forward(input);
float *output = (float*)malloc(sizeof(float) * c*h/s*w/s);
hipMemcpy(output, doutput, sizeof(float) * c*h/s*w/s, hipMemcpyDeviceToHost);
printf("Output:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h/s; j++) {
for (int k=0; k<w/s; k++) printf("%9.6f ", output[i*h/s*w/s + j*w/s + k]);
printf("\n");
}
printf("\n");
}
for (int i=0; i<c*h/s*w/s; i++) output[i] = randn();
printf("\nLoss:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h/s; j++) {
for (int k=0; k<w/s; k++) printf("%9.6f ", output[i*h*w/s/s + j*w/s + k]);
printf("\n");
}
printf("\n");
}
float *loss;
hipMalloc(&loss, sizeof(float) * c*h/s*w/s);
hipMemcpy(loss, output, sizeof(float) * c*h/s*w/s, hipMemcpyHostToDevice);
printf("*** Step 3: backward ***\n");
float *dd = p.backward(loss, 0.1);
float *d = (float*)malloc(sizeof(float)*c*h*w);
hipMemcpy(d, dd, sizeof(float)*c*h*w, hipMemcpyDeviceToHost);
printf("Back-prop delta:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h; j++) {
for (int k=0; k<w; k++) printf("%9.6f ", d[i*h*w + j*w + k]);
printf("\n");
}
printf("\n");
}
free(d);
free(output);
hipFree(loss);
}
void test_conv(float *input, int c, int h, int w, int oc, int k, int s, int p) {
printf("***** Testing conv layer ******\n");
printf("*** Step 1: init conv layer ***\n");
Conv conv(c, h, w, oc, k, s, p);
conv.dump();
printf("*** Step 2: forward ***\n");
int oh = (h+2*p-k)/s+1;
int ow = (w+2*p-k)/s+1;
float *doutput = conv.forward(input);
float *output = (float*)malloc(sizeof(float) * oc*oh*ow);
hipMemcpy(output, doutput, sizeof(float) * oc*oh*ow, hipMemcpyDeviceToHost);
printf("Output:\n");
for (int i=0; i<oc; i++) {
for (int j=0; j<oh; j++) {
for (int k=0; k<ow; k++) printf("%9.6f ", output[i*oh*ow + j*ow + k]);
printf("\n");
}
printf("\n");
}
for (int i=0; i<oc*oh*ow; i++) output[i] = randn();
printf("\nLoss:\n");
for (int i=0; i<oc; i++) {
for (int j=0; j<oh; j++) {
for (int k=0; k<ow; k++) printf("%9.6f ", output[i*oh*ow + j*ow + k]);
printf("\n");
}
printf("\n");
}
float *loss;
hipMalloc(&loss, sizeof(float) * oc*oh*ow);
hipMemcpy(loss, output, sizeof(float) * oc*oh*ow, hipMemcpyHostToDevice);
printf("*** Step 3: backward ***\n");
float *dd = conv.backward(loss, 0.1);
float *d = (float*)malloc(sizeof(float)*c*h*w);
hipMemcpy(d, dd, sizeof(float)*c*h*w, hipMemcpyDeviceToHost);
printf("Back-prop delta:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h; j++) {
for (int k=0; k<w; k++) printf("%9.6f ", d[i*h*w + j*w + k]);
printf("\n");
}
printf("\n");
}
conv.dump();
free(d);
free(output);
hipFree(loss);
}
int main() {
rand_init();
int c = 3, h = 10, w = 10;
float *input = (float*)malloc(sizeof(float) * c*h*w);
for (int i=0; i<c*h*w; i++) input[i] = randn();
float *dinput;
hipMalloc(&dinput, sizeof(float) * c*h*w);
hipMemcpy(dinput, input, sizeof(float) * c*h*w, hipMemcpyHostToDevice);
printf("Input:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h; j++) {
for (int k=0; k<w; k++) printf("%9.6f ", input[i*h*w + j*w + k]);
printf("\n");
}
printf("\n");
}
test_conv(dinput, c, h, w, 5, 3, 2, 2);
test_pooling(dinput, c, h, w, 2);
test_dense(dinput, c, 10);
free(input);
hipFree(dinput);
return 0;
}
| 20d9c4ec9f583de8d1686f69a179d8d136cd3042.cu | #include <stdio.h>
#include <stdlib.h>
#include "common.h"
#include "layers/conv.h"
#include "layers/dense.h"
#include "layers/pooling.h"
void test_dense(float *input, int input_channel, int output_channel) {
printf("****** Testing dense layer ******\n");
printf("*** Step 1: init dense layer ***\n");
Dense d(input_channel, output_channel);
d.dump();
printf("*** Step 2: forward ***\n");
float *doutput = d.forward(input);
float *output = (float*)malloc(sizeof(float) * output_channel);
cudaMemcpy(output, doutput, sizeof(float) * output_channel, cudaMemcpyDeviceToHost);
printf("Output: ");
for (int i=0; i<output_channel; i++) printf("%9.6f ", output[i]);
printf("\n\n");
for (int i=0; i<10; i++) output[i] = randn();
printf("Loss: ");
for (int i=0; i<10; i++) printf("%9.6f ", output[i]);
printf("\n\n");
float *loss;
cudaMalloc(&loss, sizeof(float) * output_channel);
cudaMemcpy(loss, output, sizeof(float) * 10, cudaMemcpyHostToDevice);
printf("*** Step 3: backward ***\n");
float *ddelta = d.backward(loss, 0.1);
float *delta = (float*)malloc(sizeof(float) * input_channel);
cudaMemcpy(delta, ddelta, sizeof(float) * input_channel, cudaMemcpyDeviceToHost);
printf("Back-prop delta: ");
for (int i=0; i<input_channel; i++) printf("%9.6f ", delta[i]);
printf("\n\n");
d.dump();
free(output);
free(delta);
cudaFree(loss);
}
void test_pooling(float *input, int c, int h, int w, int s) {
printf("***** Testing pooling layer ******\n");
printf("*** Step 1: init pooling layer ***\n");
Pooling p(c, h, w, s);
p.dump();
printf("\n");
printf("*** Step 2: forward ***\n");
float *doutput = p.forward(input);
float *output = (float*)malloc(sizeof(float) * c*h/s*w/s);
cudaMemcpy(output, doutput, sizeof(float) * c*h/s*w/s, cudaMemcpyDeviceToHost);
printf("Output:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h/s; j++) {
for (int k=0; k<w/s; k++) printf("%9.6f ", output[i*h/s*w/s + j*w/s + k]);
printf("\n");
}
printf("\n");
}
for (int i=0; i<c*h/s*w/s; i++) output[i] = randn();
printf("\nLoss:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h/s; j++) {
for (int k=0; k<w/s; k++) printf("%9.6f ", output[i*h*w/s/s + j*w/s + k]);
printf("\n");
}
printf("\n");
}
float *loss;
cudaMalloc(&loss, sizeof(float) * c*h/s*w/s);
cudaMemcpy(loss, output, sizeof(float) * c*h/s*w/s, cudaMemcpyHostToDevice);
printf("*** Step 3: backward ***\n");
float *dd = p.backward(loss, 0.1);
float *d = (float*)malloc(sizeof(float)*c*h*w);
cudaMemcpy(d, dd, sizeof(float)*c*h*w, cudaMemcpyDeviceToHost);
printf("Back-prop delta:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h; j++) {
for (int k=0; k<w; k++) printf("%9.6f ", d[i*h*w + j*w + k]);
printf("\n");
}
printf("\n");
}
free(d);
free(output);
cudaFree(loss);
}
void test_conv(float *input, int c, int h, int w, int oc, int k, int s, int p) {
printf("***** Testing conv layer ******\n");
printf("*** Step 1: init conv layer ***\n");
Conv conv(c, h, w, oc, k, s, p);
conv.dump();
printf("*** Step 2: forward ***\n");
int oh = (h+2*p-k)/s+1;
int ow = (w+2*p-k)/s+1;
float *doutput = conv.forward(input);
float *output = (float*)malloc(sizeof(float) * oc*oh*ow);
cudaMemcpy(output, doutput, sizeof(float) * oc*oh*ow, cudaMemcpyDeviceToHost);
printf("Output:\n");
for (int i=0; i<oc; i++) {
for (int j=0; j<oh; j++) {
for (int k=0; k<ow; k++) printf("%9.6f ", output[i*oh*ow + j*ow + k]);
printf("\n");
}
printf("\n");
}
for (int i=0; i<oc*oh*ow; i++) output[i] = randn();
printf("\nLoss:\n");
for (int i=0; i<oc; i++) {
for (int j=0; j<oh; j++) {
for (int k=0; k<ow; k++) printf("%9.6f ", output[i*oh*ow + j*ow + k]);
printf("\n");
}
printf("\n");
}
float *loss;
cudaMalloc(&loss, sizeof(float) * oc*oh*ow);
cudaMemcpy(loss, output, sizeof(float) * oc*oh*ow, cudaMemcpyHostToDevice);
printf("*** Step 3: backward ***\n");
float *dd = conv.backward(loss, 0.1);
float *d = (float*)malloc(sizeof(float)*c*h*w);
cudaMemcpy(d, dd, sizeof(float)*c*h*w, cudaMemcpyDeviceToHost);
printf("Back-prop delta:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h; j++) {
for (int k=0; k<w; k++) printf("%9.6f ", d[i*h*w + j*w + k]);
printf("\n");
}
printf("\n");
}
conv.dump();
free(d);
free(output);
cudaFree(loss);
}
int main() {
rand_init();
int c = 3, h = 10, w = 10;
float *input = (float*)malloc(sizeof(float) * c*h*w);
for (int i=0; i<c*h*w; i++) input[i] = randn();
float *dinput;
cudaMalloc(&dinput, sizeof(float) * c*h*w);
cudaMemcpy(dinput, input, sizeof(float) * c*h*w, cudaMemcpyHostToDevice);
printf("Input:\n");
for (int i=0; i<c; i++) {
for (int j=0; j<h; j++) {
for (int k=0; k<w; k++) printf("%9.6f ", input[i*h*w + j*w + k]);
printf("\n");
}
printf("\n");
}
test_conv(dinput, c, h, w, 5, 3, 2, 2);
test_pooling(dinput, c, h, w, 2);
test_dense(dinput, c, 10);
free(input);
cudaFree(dinput);
return 0;
}
|
9f60816543162f428e1524b6f1dc1f5e8d8e4ab3.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/image_dim_prefetching_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void ImageDimPrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch =
BasePrefetchingDataLayer<Dtype>::prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
if (output_data_dim_) {
// Reshape to loaded labels.
top[2]->ReshapeLike(batch->dim_);
// Copy the labels.
caffe_copy(batch->dim_.count(), batch->dim_.gpu_data(),
top[2]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
BasePrefetchingDataLayer<Dtype>::prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(ImageDimPrefetchingDataLayer);
} // namespace caffe
| 9f60816543162f428e1524b6f1dc1f5e8d8e4ab3.cu | #include <vector>
#include "caffe/layers/image_dim_prefetching_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void ImageDimPrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch =
BasePrefetchingDataLayer<Dtype>::prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
if (output_data_dim_) {
// Reshape to loaded labels.
top[2]->ReshapeLike(batch->dim_);
// Copy the labels.
caffe_copy(batch->dim_.count(), batch->dim_.gpu_data(),
top[2]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
BasePrefetchingDataLayer<Dtype>::prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(ImageDimPrefetchingDataLayer);
} // namespace caffe
|
529722fd700c3e03700090bda6de05af509e82d0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "stdio.h"
#define TILE_SIZE 64
#define WARP_SIZE 32
extern "C" void CSR_matvec(int N, int nnz, int* start, int* indices, float* data, float* x, float *y, bool bVectorized);
extern "C" void CSR_create(int N, int nnz, int* start, int * indices, float * data , float * x , float * y, int** start_d, int **indices_d, float **data_d, float **x_d, float **y_d);
extern "C" void CSR_kernel(int N, int nnz, int* start_d, int * indices_d, float * data_d , float * x_d , float * y_d, bool bVectorized);
extern "C" void CSR_destroy(int* start_d, int* indices_d, float* data_d, float* x_d, float* y_d);
extern "C" void ELL_create(int N, int num_cols_per_row, int * indices, float * data , float * x , float * y, int **indices_d, float **data_d, float **x_d, float **y_d);
extern "C" void ELL_kernel(int N, int num_cols_per_row , int * indices_d, float * data_d , float * x_d , float * y_d);
extern "C" void ELL_destroy(int* indices_d, float* data_d, float* x_d, float* y_d);
extern "C" void band_create(int N, int num_cols_per_row, float * data , float * x , float * y, float **data_d, float **x_d, float **y_d);
extern "C" void band_kernel(int N, int num_cols_per_row , float * data_d , float * x_d , float * y_d);
extern "C" void band_destroy(float* data_d, float* x_d, float* y_d);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
hipError_t error = hipGetLastError(); \
if (error != hipSuccess) { \
printf("(CUDA) %s", hipGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(const int N, int *start, int* indices, float *data, float *x, float* y) {
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if ( row < N ){
float dot = 0;
int row_start = start [ row ];
int row_end = start [ row+1];
for ( int jj = row_start ; jj < row_end ; jj ++) {
dot += data [ jj ] * x [ indices [ jj ]];
}
y[row] = dot ;
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(const int N, int *start, int* indices, float *data, float *x, float* y) {
__shared__ float vals[TILE_SIZE];
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int warp_id = thread_id / WARP_SIZE;
int lane = thread_id & (WARP_SIZE - 1);
int row = warp_id;
if (row < N) {
int row_start = start[row];
int row_end = start[row + 1];
// compute running sum per thread
vals[threadIdx.x] = 0;
for (int jj = row_start + lane; jj < row_end; jj += WARP_SIZE) {
vals[threadIdx.x] += data[jj] * x[indices[jj]];
}
// parallel reduction in shared memory
for (int d = WARP_SIZE >> 1; d >= 1; d >>= 1) {
if (lane < d) vals[threadIdx.x] += vals[threadIdx.x + d];
}
// first thread in a warp writes the result
if (lane == 0) {
y[row] = vals[threadIdx.x];
}
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( const int N, const int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if ( row < N ){
float dot = 0;
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = indices [ N * n + row ];
float val = data [ N * n + row ];
if ( val != 0)
dot += val * x [ col ];
}
y [ row ] = dot ;
}
}
/**
* Cuda kernel for: Band(A)x = y
*/
__global__ void band_matvec(int N, int k_max,
float* a, float* x, float* y) {
int i = TILE_SIZE * blockIdx.x + threadIdx.x;
if (i < N) {
float dot = 0;
for (int k = 0; k < 2 * k_max + 1; k++) {
float val = a[N * k + i];
int j = i + k - k_max;
if (val != 0) dot += val * x[j];
}
y[i] = dot;
}
}
/**
* Perform: CSR(A)x = y
*/
void CSR_matvec(const int N, const int nnz, int* start, int * indices, float * data , float * x , float * y, const bool bVectorized) {
int *start_d, *indices_d;
float *data_d, *x_d, *y_d;
CSR_create(N, nnz, start, indices, data, x, y, &start_d, &indices_d, &data_d, &x_d, &y_d);
CSR_kernel(N, nnz, start_d, indices_d, data_d, x_d, y_d, bVectorized);
hipMemcpy(y, y_d, N * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError();
CSR_destroy(start_d, indices_d, data_d, x_d, y_d);
}
/**
* Create CSR matrix
*/
void CSR_create(const int N, const int nnz,
int * start, int * indices, float * data , float * x , float * y,
int ** start_d, int ** indices_d, float **data_d, float **x_d, float **y_d) {
/************************/
/* copy to device */
/************************/
hipMalloc((void **) start_d, (N+1) * sizeof(int));
checkCUDAError();
hipMemcpy(*start_d, start, (N+1) * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) indices_d, nnz * sizeof(int));
checkCUDAError();
hipMemcpy(*indices_d, indices, nnz * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) data_d, nnz * sizeof(float));
checkCUDAError();
hipMemcpy(*data_d, data, nnz * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(*x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(*y_d, y, N * sizeof(float) , hipMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: CSR(A)x = y
*/
void CSR_kernel(const int N, const int nnz, int * start_d , int * indices_d, float * data_d , float * x_d , float * y_d, const bool bVectorized) {
if (bVectorized) {
//#threads = #rows * #threads per row (= N * WARP_SIZE)
dim3 grid((N * WARP_SIZE + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
hipLaunchKernelGGL(( k_csr2_mat_vec_mm) , dim3(grid), dim3(block) , 0, 0, N, start_d, indices_d, data_d, x_d, y_d);
} else {
//#threads = #rows (= N)
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
hipLaunchKernelGGL(( k_csr_mat_vec_mm) , dim3(grid), dim3(block) , 0, 0, N, start_d, indices_d, data_d, x_d, y_d);
}
checkCUDAError();
}
/**
* Destroy CSR matrix
*/
void CSR_destroy(int* start_d, int* indices_d, float* data_d, float* x_d, float* y_d) {
hipFree(start_d);
hipFree(indices_d);
hipFree(data_d);
hipFree(x_d);
hipFree(y_d);
}
/**
* Create band matrix
*/
void band_create(const int N, const int num_cols_per_row,
float * data , float * x , float * y,
float **data_d, float **x_d, float **y_d) {
hipMalloc((void **) data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
hipMemcpy(*data_d, data, N * num_cols_per_row * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(*x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(*y_d, y, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: band(A)x = y
*/
void band_kernel(int N, int k_max , float * data_d , float * x_d , float * y_d) {
//#threads = #rows (= N)
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
hipLaunchKernelGGL(( band_matvec) , dim3(grid), dim3(block) , 0, 0, N, k_max, data_d , x_d, y_d);
checkCUDAError();
}
/**
* Destroy ELL matrix
*/
void band_destroy(float* data_d, float* x_d, float* y_d) {
hipFree(data_d);
hipFree(x_d);
hipFree(y_d);
}
/**
* Create ELL matrix
*/
void ELL_create(const int N, const int num_cols_per_row,
int * indices, float * data , float * x , float * y,
int ** indices_d, float **data_d, float **x_d, float **y_d) {
hipMalloc((void **) indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
hipMemcpy(*indices_d, indices, N * num_cols_per_row * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
hipMemcpy(*data_d, data, N * num_cols_per_row * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(*x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(*y_d, y, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: ELL(A)x = y
*/
void ELL_kernel(int N, int num_cols_per_row , int * indices_d, float * data_d , float * x_d , float * y_d) {
//round grid size N/TILE_SIZE up
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
hipLaunchKernelGGL(( k_ell_mat_vec_mm) , dim3(grid), dim3(block) , 0, 0, N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
}
/**
* Destroy ELL matrix
*/
void ELL_destroy(int* indices_d, float* data_d, float* x_d, float* y_d) {
hipFree(indices_d);
hipFree(data_d);
hipFree(x_d);
hipFree(y_d);
}
| 529722fd700c3e03700090bda6de05af509e82d0.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
#define TILE_SIZE 64
#define WARP_SIZE 32
extern "C" void CSR_matvec(int N, int nnz, int* start, int* indices, float* data, float* x, float *y, bool bVectorized);
extern "C" void CSR_create(int N, int nnz, int* start, int * indices, float * data , float * x , float * y, int** start_d, int **indices_d, float **data_d, float **x_d, float **y_d);
extern "C" void CSR_kernel(int N, int nnz, int* start_d, int * indices_d, float * data_d , float * x_d , float * y_d, bool bVectorized);
extern "C" void CSR_destroy(int* start_d, int* indices_d, float* data_d, float* x_d, float* y_d);
extern "C" void ELL_create(int N, int num_cols_per_row, int * indices, float * data , float * x , float * y, int **indices_d, float **data_d, float **x_d, float **y_d);
extern "C" void ELL_kernel(int N, int num_cols_per_row , int * indices_d, float * data_d , float * x_d , float * y_d);
extern "C" void ELL_destroy(int* indices_d, float* data_d, float* x_d, float* y_d);
extern "C" void band_create(int N, int num_cols_per_row, float * data , float * x , float * y, float **data_d, float **x_d, float **y_d);
extern "C" void band_kernel(int N, int num_cols_per_row , float * data_d , float * x_d , float * y_d);
extern "C" void band_destroy(float* data_d, float* x_d, float* y_d);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
cudaError_t error = cudaGetLastError(); \
if (error != cudaSuccess) { \
printf("(CUDA) %s", cudaGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(const int N, int *start, int* indices, float *data, float *x, float* y) {
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if ( row < N ){
float dot = 0;
int row_start = start [ row ];
int row_end = start [ row+1];
for ( int jj = row_start ; jj < row_end ; jj ++) {
dot += data [ jj ] * x [ indices [ jj ]];
}
y[row] = dot ;
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(const int N, int *start, int* indices, float *data, float *x, float* y) {
__shared__ float vals[TILE_SIZE];
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int warp_id = thread_id / WARP_SIZE;
int lane = thread_id & (WARP_SIZE - 1);
int row = warp_id;
if (row < N) {
int row_start = start[row];
int row_end = start[row + 1];
// compute running sum per thread
vals[threadIdx.x] = 0;
for (int jj = row_start + lane; jj < row_end; jj += WARP_SIZE) {
vals[threadIdx.x] += data[jj] * x[indices[jj]];
}
// parallel reduction in shared memory
for (int d = WARP_SIZE >> 1; d >= 1; d >>= 1) {
if (lane < d) vals[threadIdx.x] += vals[threadIdx.x + d];
}
// first thread in a warp writes the result
if (lane == 0) {
y[row] = vals[threadIdx.x];
}
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( const int N, const int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if ( row < N ){
float dot = 0;
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = indices [ N * n + row ];
float val = data [ N * n + row ];
if ( val != 0)
dot += val * x [ col ];
}
y [ row ] = dot ;
}
}
/**
* Cuda kernel for: Band(A)x = y
*/
__global__ void band_matvec(int N, int k_max,
float* a, float* x, float* y) {
int i = TILE_SIZE * blockIdx.x + threadIdx.x;
if (i < N) {
float dot = 0;
for (int k = 0; k < 2 * k_max + 1; k++) {
float val = a[N * k + i];
int j = i + k - k_max;
if (val != 0) dot += val * x[j];
}
y[i] = dot;
}
}
/**
* Perform: CSR(A)x = y
*/
void CSR_matvec(const int N, const int nnz, int* start, int * indices, float * data , float * x , float * y, const bool bVectorized) {
int *start_d, *indices_d;
float *data_d, *x_d, *y_d;
CSR_create(N, nnz, start, indices, data, x, y, &start_d, &indices_d, &data_d, &x_d, &y_d);
CSR_kernel(N, nnz, start_d, indices_d, data_d, x_d, y_d, bVectorized);
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
CSR_destroy(start_d, indices_d, data_d, x_d, y_d);
}
/**
* Create CSR matrix
*/
void CSR_create(const int N, const int nnz,
int * start, int * indices, float * data , float * x , float * y,
int ** start_d, int ** indices_d, float **data_d, float **x_d, float **y_d) {
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) start_d, (N+1) * sizeof(int));
checkCUDAError();
cudaMemcpy(*start_d, start, (N+1) * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) indices_d, nnz * sizeof(int));
checkCUDAError();
cudaMemcpy(*indices_d, indices, nnz * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) data_d, nnz * sizeof(float));
checkCUDAError();
cudaMemcpy(*data_d, data, nnz * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*y_d, y, N * sizeof(float) , cudaMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: CSR(A)x = y
*/
void CSR_kernel(const int N, const int nnz, int * start_d , int * indices_d, float * data_d , float * x_d , float * y_d, const bool bVectorized) {
if (bVectorized) {
//#threads = #rows * #threads per row (= N * WARP_SIZE)
dim3 grid((N * WARP_SIZE + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr2_mat_vec_mm <<< grid, block >>> (N, start_d, indices_d, data_d, x_d, y_d);
} else {
//#threads = #rows (= N)
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm <<< grid, block >>> (N, start_d, indices_d, data_d, x_d, y_d);
}
checkCUDAError();
}
/**
* Destroy CSR matrix
*/
void CSR_destroy(int* start_d, int* indices_d, float* data_d, float* x_d, float* y_d) {
cudaFree(start_d);
cudaFree(indices_d);
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
}
/**
* Create band matrix
*/
void band_create(const int N, const int num_cols_per_row,
float * data , float * x , float * y,
float **data_d, float **x_d, float **y_d) {
cudaMalloc((void **) data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
cudaMemcpy(*data_d, data, N * num_cols_per_row * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*y_d, y, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: band(A)x = y
*/
void band_kernel(int N, int k_max , float * data_d , float * x_d , float * y_d) {
//#threads = #rows (= N)
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
band_matvec <<< grid, block >>> (N, k_max, data_d , x_d, y_d);
checkCUDAError();
}
/**
* Destroy ELL matrix
*/
void band_destroy(float* data_d, float* x_d, float* y_d) {
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
}
/**
* Create ELL matrix
*/
void ELL_create(const int N, const int num_cols_per_row,
int * indices, float * data , float * x , float * y,
int ** indices_d, float **data_d, float **x_d, float **y_d) {
cudaMalloc((void **) indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
cudaMemcpy(*indices_d, indices, N * num_cols_per_row * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
cudaMemcpy(*data_d, data, N * num_cols_per_row * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(*y_d, y, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
}
/**
* Perform: ELL(A)x = y
*/
void ELL_kernel(int N, int num_cols_per_row , int * indices_d, float * data_d , float * x_d , float * y_d) {
//round grid size N/TILE_SIZE up
dim3 grid((N + TILE_SIZE - 1)/TILE_SIZE, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
}
/**
* Destroy ELL matrix
*/
void ELL_destroy(int* indices_d, float* data_d, float* x_d, float* y_d) {
cudaFree(indices_d);
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
}
|
8e0e887fa398599a76b4955b9214a5281daea86b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "utils.h"
#include "common.h"
__host__ __device__ float icgCudaRenderInterLinePlane(const float* r0,
const float* rd, const float* p, float bg_val) {
float x0 = r0[0];
float y0 = r0[1];
float z0 = r0[2];
float xd = rd[0];
float yd = rd[1];
float zd = rd[2];
float x1 = p[0];
float y1 = p[1];
float z1 = p[2];
float x2 = p[3];
float y2 = p[4];
float z2 = p[5];
float x3 = p[6];
float y3 = p[7];
float z3 = p[8];
float x4 = p[9];
float y4 = p[10];
float z4 = p[11];
float A = y1 * (z2 - z3) + y2 * (z3 - z1) + y3 * (z1 - z2);
float B = z1 * (x2 - x3) + z2 * (x3 - x1) + z3 * (x1 - x2);
float C = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2);
float D = -x1 * (y2*z3 - y3*z2) - x2 * (y3*z1 - y1*z3) - x3 * (y1*z2 - y2*z1);
float t = -(A * x0 + B * y0 + C * z0 + D) / (A * xd + B * yd + C * zd);
float xi = x0 + t * xd;
float yi = y0 + t * yd;
float zi = z0 + t * zd;
float v1 = x1 * (x2 - x1) + y1 * (y2 - y1) + z1 * (z2 - z1);
float v2 = xi * (x2 - x1) + yi * (y2 - y1) + zi * (z2 - z1);
float v3 = x2 * (x2 - x1) + y2 * (y2 - y1) + z2 * (z2 - z1);
float v4 = x1 * (x4 - x1) + y1 * (y4 - y1) + z1 * (z4 - z1);
float v5 = xi * (x4 - x1) + yi * (y4 - y1) + zi * (z4 - z1);
float v6 = x4 * (x4 - x1) + y4 * (y4 - y1) + z4 * (z4 - z1);
bool within = (v1 <= v2 && v2 <= v3) && (v4 <= v5 && v5 <= v6);
/* return within && t >= 0 ? t : bg_val; */
return within ? (t < 0 ? 0 : t) : bg_val;
}
__host__ __device__ float icgCudaRenderInterLineSphere(const float* r0,
const float* rd, const float* s, float bg_val) {
float x0 = r0[0];
float y0 = r0[1];
float z0 = r0[2];
float rd_norm = sqrt(rd[0] * rd[0] + rd[1] * rd[1] + rd[2] * rd[2]);
float xd = rd[0] / rd_norm;
float yd = rd[1] / rd_norm;
float zd = rd[2] / rd_norm;
float xc = s[0];
float yc = s[1];
float zc = s[2];
float r = s[3];
float rd_dot_r0_m_c = xd * (x0 - xc) + yd * (y0 - yc) + zd * (z0 - zc);
float r0_m_c_dot = (x0 - xc) * (x0 - xc) + (y0 - yc) * (y0 - yc) + (z0 - zc) * (z0 - zc);
float radicand = rd_dot_r0_m_c * rd_dot_r0_m_c - r0_m_c_dot + r * r;
float t1 = -rd_dot_r0_m_c + sqrt(radicand);
float t2 = -rd_dot_r0_m_c - sqrt(radicand);
return radicand < 0 ? bg_val : (t1 < t2 ? t1 : t2);
}
__global__ void icgCudaRenderRender(float* img, int length, int img_height, int img_width,
float cam_x, float cam_y, float cam_z, float step_x, float step_y,
int n_planes, const float* planes, int n_spheres, const float* spheres,
float bg_val) {
CUDA_KERNEL_LOOP(img_idx, length) {
int h = img_height - img_idx / img_width - 1;
int w = img_idx % img_width;
float r0[3];
r0[0] = cam_x + w * step_x;
r0[1] = cam_y + h * step_y;
r0[2] = cam_z;
float rd[3];
rd[0] = 0;
rd[1] = 0;
rd[2] = 1;
float val = bg_val;
for(int pl_idx = 0; pl_idx < n_planes; ++pl_idx) {
float pl_val = icgCudaRenderInterLinePlane(r0, rd, planes + 3 * 4 * pl_idx, bg_val);
val = pl_val < val ? pl_val : val;
}
for(int sp_idx = 0; sp_idx < n_spheres; ++sp_idx) {
float sp_val = icgCudaRenderInterLineSphere(r0, rd, spheres + 4 * sp_idx, bg_val);
val = sp_val < val ? sp_val : val;
}
img[img_idx] = val < 0 ? 0 : val;
}
}
static int icgrender_IcgCudaRender_render(lua_State *L)
{
THCState* state = getCutorchState(L);
THCudaTensor* planes = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* spheres = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
double cam_x = luaT_getfieldchecknumber(L, 1, "cam_x");
double cam_y = luaT_getfieldchecknumber(L, 1, "cam_y");
double cam_z = luaT_getfieldchecknumber(L, 1, "cam_z");
long img_height = luaT_getfieldchecknumber(L, 1, "img_height");
long img_width = luaT_getfieldchecknumber(L, 1, "img_width");
double step_x = luaT_getfieldchecknumber(L, 1, "step_x");
double step_y = luaT_getfieldchecknumber(L, 1, "step_y");
double bg_val = luaT_getfieldchecknumber(L, 1, "bg_val");
THCudaTensor* img = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "img", "torch.CudaTensor");
THCudaTensor_resize2d(state, img, img_height, img_width);
long n_planes = 0;
if(THCudaTensor_nDimension(state, planes) > 0) {
n_planes = THCudaTensor_size(state, planes, 0);
}
long n_spheres = 0;
if(THCudaTensor_nDimension(state, spheres) > 0) {
n_spheres = THCudaTensor_size(state, spheres, 0);
}
long length = img_height * img_width;
hipLaunchKernelGGL(( icgCudaRenderRender), dim3(GET_BLOCKS(length)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, img), length, img_height, img_width,
cam_x, cam_y, cam_z, step_x, step_y,
n_planes, THCudaTensor_data(state, planes),
n_spheres, THCudaTensor_data(state, spheres),
bg_val);
THCudaCheck(hipGetLastError());
return 1;
}
static const struct luaL_Reg icgrender_IcgCudaRender__ [] = {
{"IcgCudaRender_render", icgrender_IcgCudaRender_render},
{NULL, NULL}
};
void icgrender_IcgCudaRender_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
#if LUA_VERSION_NUM == 501
luaL_register(L, NULL, icgrender_IcgCudaRender__);
#else
luaL_setfuncs(L, icgrender_IcgCudaRender__, 0);
#endif
// luaT_registeratname(L, icgrender_IcgRender__, "icgrender");
// lua_pop(L, 1);
}
| 8e0e887fa398599a76b4955b9214a5281daea86b.cu | // Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "utils.h"
#include "common.h"
__host__ __device__ float icgCudaRenderInterLinePlane(const float* r0,
const float* rd, const float* p, float bg_val) {
float x0 = r0[0];
float y0 = r0[1];
float z0 = r0[2];
float xd = rd[0];
float yd = rd[1];
float zd = rd[2];
float x1 = p[0];
float y1 = p[1];
float z1 = p[2];
float x2 = p[3];
float y2 = p[4];
float z2 = p[5];
float x3 = p[6];
float y3 = p[7];
float z3 = p[8];
float x4 = p[9];
float y4 = p[10];
float z4 = p[11];
float A = y1 * (z2 - z3) + y2 * (z3 - z1) + y3 * (z1 - z2);
float B = z1 * (x2 - x3) + z2 * (x3 - x1) + z3 * (x1 - x2);
float C = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2);
float D = -x1 * (y2*z3 - y3*z2) - x2 * (y3*z1 - y1*z3) - x3 * (y1*z2 - y2*z1);
float t = -(A * x0 + B * y0 + C * z0 + D) / (A * xd + B * yd + C * zd);
float xi = x0 + t * xd;
float yi = y0 + t * yd;
float zi = z0 + t * zd;
float v1 = x1 * (x2 - x1) + y1 * (y2 - y1) + z1 * (z2 - z1);
float v2 = xi * (x2 - x1) + yi * (y2 - y1) + zi * (z2 - z1);
float v3 = x2 * (x2 - x1) + y2 * (y2 - y1) + z2 * (z2 - z1);
float v4 = x1 * (x4 - x1) + y1 * (y4 - y1) + z1 * (z4 - z1);
float v5 = xi * (x4 - x1) + yi * (y4 - y1) + zi * (z4 - z1);
float v6 = x4 * (x4 - x1) + y4 * (y4 - y1) + z4 * (z4 - z1);
bool within = (v1 <= v2 && v2 <= v3) && (v4 <= v5 && v5 <= v6);
/* return within && t >= 0 ? t : bg_val; */
return within ? (t < 0 ? 0 : t) : bg_val;
}
__host__ __device__ float icgCudaRenderInterLineSphere(const float* r0,
const float* rd, const float* s, float bg_val) {
float x0 = r0[0];
float y0 = r0[1];
float z0 = r0[2];
float rd_norm = sqrt(rd[0] * rd[0] + rd[1] * rd[1] + rd[2] * rd[2]);
float xd = rd[0] / rd_norm;
float yd = rd[1] / rd_norm;
float zd = rd[2] / rd_norm;
float xc = s[0];
float yc = s[1];
float zc = s[2];
float r = s[3];
float rd_dot_r0_m_c = xd * (x0 - xc) + yd * (y0 - yc) + zd * (z0 - zc);
float r0_m_c_dot = (x0 - xc) * (x0 - xc) + (y0 - yc) * (y0 - yc) + (z0 - zc) * (z0 - zc);
float radicand = rd_dot_r0_m_c * rd_dot_r0_m_c - r0_m_c_dot + r * r;
float t1 = -rd_dot_r0_m_c + sqrt(radicand);
float t2 = -rd_dot_r0_m_c - sqrt(radicand);
return radicand < 0 ? bg_val : (t1 < t2 ? t1 : t2);
}
__global__ void icgCudaRenderRender(float* img, int length, int img_height, int img_width,
float cam_x, float cam_y, float cam_z, float step_x, float step_y,
int n_planes, const float* planes, int n_spheres, const float* spheres,
float bg_val) {
CUDA_KERNEL_LOOP(img_idx, length) {
int h = img_height - img_idx / img_width - 1;
int w = img_idx % img_width;
float r0[3];
r0[0] = cam_x + w * step_x;
r0[1] = cam_y + h * step_y;
r0[2] = cam_z;
float rd[3];
rd[0] = 0;
rd[1] = 0;
rd[2] = 1;
float val = bg_val;
for(int pl_idx = 0; pl_idx < n_planes; ++pl_idx) {
float pl_val = icgCudaRenderInterLinePlane(r0, rd, planes + 3 * 4 * pl_idx, bg_val);
val = pl_val < val ? pl_val : val;
}
for(int sp_idx = 0; sp_idx < n_spheres; ++sp_idx) {
float sp_val = icgCudaRenderInterLineSphere(r0, rd, spheres + 4 * sp_idx, bg_val);
val = sp_val < val ? sp_val : val;
}
img[img_idx] = val < 0 ? 0 : val;
}
}
static int icgrender_IcgCudaRender_render(lua_State *L)
{
THCState* state = getCutorchState(L);
THCudaTensor* planes = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* spheres = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
double cam_x = luaT_getfieldchecknumber(L, 1, "cam_x");
double cam_y = luaT_getfieldchecknumber(L, 1, "cam_y");
double cam_z = luaT_getfieldchecknumber(L, 1, "cam_z");
long img_height = luaT_getfieldchecknumber(L, 1, "img_height");
long img_width = luaT_getfieldchecknumber(L, 1, "img_width");
double step_x = luaT_getfieldchecknumber(L, 1, "step_x");
double step_y = luaT_getfieldchecknumber(L, 1, "step_y");
double bg_val = luaT_getfieldchecknumber(L, 1, "bg_val");
THCudaTensor* img = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "img", "torch.CudaTensor");
THCudaTensor_resize2d(state, img, img_height, img_width);
long n_planes = 0;
if(THCudaTensor_nDimension(state, planes) > 0) {
n_planes = THCudaTensor_size(state, planes, 0);
}
long n_spheres = 0;
if(THCudaTensor_nDimension(state, spheres) > 0) {
n_spheres = THCudaTensor_size(state, spheres, 0);
}
long length = img_height * img_width;
icgCudaRenderRender<<<GET_BLOCKS(length), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, img), length, img_height, img_width,
cam_x, cam_y, cam_z, step_x, step_y,
n_planes, THCudaTensor_data(state, planes),
n_spheres, THCudaTensor_data(state, spheres),
bg_val);
THCudaCheck(cudaGetLastError());
return 1;
}
static const struct luaL_Reg icgrender_IcgCudaRender__ [] = {
{"IcgCudaRender_render", icgrender_IcgCudaRender_render},
{NULL, NULL}
};
void icgrender_IcgCudaRender_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
#if LUA_VERSION_NUM == 501
luaL_register(L, NULL, icgrender_IcgCudaRender__);
#else
luaL_setfuncs(L, icgrender_IcgCudaRender__, 0);
#endif
// luaT_registeratname(L, icgrender_IcgRender__, "icgrender");
// lua_pop(L, 1);
}
|
2721652a3655f1ba5b319ac38c012842d8ca1279.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "threshold_one.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vec = NULL;
hipMalloc(&vec, XSIZE*YSIZE);
float *vec_thres = NULL;
hipMalloc(&vec_thres, XSIZE*YSIZE);
int *bin = NULL;
hipMalloc(&bin, XSIZE*YSIZE);
const int k_bin = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
threshold_one), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,vec_thres,bin,k_bin,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
threshold_one), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,vec_thres,bin,k_bin,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
threshold_one), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,vec_thres,bin,k_bin,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2721652a3655f1ba5b319ac38c012842d8ca1279.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "threshold_one.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vec = NULL;
cudaMalloc(&vec, XSIZE*YSIZE);
float *vec_thres = NULL;
cudaMalloc(&vec_thres, XSIZE*YSIZE);
int *bin = NULL;
cudaMalloc(&bin, XSIZE*YSIZE);
const int k_bin = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
threshold_one<<<gridBlock,threadBlock>>>(vec,vec_thres,bin,k_bin,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
threshold_one<<<gridBlock,threadBlock>>>(vec,vec_thres,bin,k_bin,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
threshold_one<<<gridBlock,threadBlock>>>(vec,vec_thres,bin,k_bin,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7a61152d310f2a626aa646daef29471af517b301.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__+2),2) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__+2),2) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-2);
int __temp_1__;
__temp_1__ = __iter_5__+(-2);
float __temp_2__;
__temp_2__ = (2 * __tilevar_2__[__temp_1__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(0-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__+(-2);
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (4 * __tilevar_2__[__temp_4__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(0-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__+(-2);
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (5 * __tilevar_2__[__temp_8__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(0-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__+(-2);
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (4 * __tilevar_2__[__temp_12__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(0-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(-2);
int __temp_16__;
__temp_16__ = __iter_5__+(2);
float __temp_17__;
__temp_17__ = (2 * __tilevar_2__[__temp_16__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(0-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
int __temp_19__;
__temp_19__ = __iter_4__+(-1);
int __temp_20__;
__temp_20__ = __iter_5__+(-2);
float __temp_21__;
__temp_21__ = (4 * __tilevar_2__[__temp_20__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_19__+(0-(__iter_1__+0)))]);
float __temp_22__;
__temp_22__ = (__temp_18__ + __temp_21__);
int __temp_23__;
__temp_23__ = __iter_4__+(-1);
int __temp_24__;
__temp_24__ = __iter_5__+(-1);
float __temp_25__;
__temp_25__ = (9 * __tilevar_2__[__temp_24__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_23__+(0-(__iter_1__+0)))]);
float __temp_26__;
__temp_26__ = (__temp_22__ + __temp_25__);
int __temp_27__;
__temp_27__ = __iter_4__+(-1);
int __temp_28__;
__temp_28__ = __iter_5__;
float __temp_29__;
__temp_29__ = (12 * __tilevar_2__[__temp_28__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_27__+(0-(__iter_1__+0)))]);
float __temp_30__;
__temp_30__ = (__temp_26__ + __temp_29__);
int __temp_31__;
__temp_31__ = __iter_4__+(-1);
int __temp_32__;
__temp_32__ = __iter_5__+(1);
float __temp_33__;
__temp_33__ = (9 * __tilevar_2__[__temp_32__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_31__+(0-(__iter_1__+0)))]);
float __temp_34__;
__temp_34__ = (__temp_30__ + __temp_33__);
int __temp_35__;
__temp_35__ = __iter_4__+(-1);
int __temp_36__;
__temp_36__ = __iter_5__+(2);
float __temp_37__;
__temp_37__ = (4 * __tilevar_2__[__temp_36__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_35__+(0-(__iter_1__+0)))]);
float __temp_38__;
__temp_38__ = (__temp_34__ + __temp_37__);
int __temp_39__;
__temp_39__ = __iter_4__;
int __temp_40__;
__temp_40__ = __iter_5__+(-2);
float __temp_41__;
__temp_41__ = (5 * __tilevar_2__[__temp_40__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_39__+(0-(__iter_1__+0)))]);
float __temp_42__;
__temp_42__ = (__temp_38__ + __temp_41__);
int __temp_43__;
__temp_43__ = __iter_4__;
int __temp_44__;
__temp_44__ = __iter_5__+(-1);
float __temp_45__;
__temp_45__ = (12 * __tilevar_2__[__temp_44__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_43__+(0-(__iter_1__+0)))]);
float __temp_46__;
__temp_46__ = (__temp_42__ + __temp_45__);
int __temp_47__;
__temp_47__ = __iter_4__;
int __temp_48__;
__temp_48__ = __iter_5__;
float __temp_49__;
__temp_49__ = (15 * __tilevar_2__[__temp_48__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_47__+(0-(__iter_1__+0)))]);
float __temp_50__;
__temp_50__ = (__temp_46__ + __temp_49__);
int __temp_51__;
__temp_51__ = __iter_4__;
int __temp_52__;
__temp_52__ = __iter_5__+(1);
float __temp_53__;
__temp_53__ = (12 * __tilevar_2__[__temp_52__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_51__+(0-(__iter_1__+0)))]);
float __temp_54__;
__temp_54__ = (__temp_50__ + __temp_53__);
int __temp_55__;
__temp_55__ = __iter_4__;
int __temp_56__;
__temp_56__ = __iter_5__+(2);
float __temp_57__;
__temp_57__ = (5 * __tilevar_2__[__temp_56__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_55__+(0-(__iter_1__+0)))]);
float __temp_58__;
__temp_58__ = (__temp_54__ + __temp_57__);
int __temp_59__;
__temp_59__ = __iter_4__+(1);
int __temp_60__;
__temp_60__ = __iter_5__+(-2);
float __temp_61__;
__temp_61__ = (4 * __tilevar_2__[__temp_60__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_59__+(0-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_58__ + __temp_61__);
int __temp_63__;
__temp_63__ = __iter_4__+(1);
int __temp_64__;
__temp_64__ = __iter_5__+(-1);
float __temp_65__;
__temp_65__ = (9 * __tilevar_2__[__temp_64__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_63__+(0-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_62__ + __temp_65__);
int __temp_67__;
__temp_67__ = __iter_4__+(1);
int __temp_68__;
__temp_68__ = __iter_5__;
float __temp_69__;
__temp_69__ = (12 * __tilevar_2__[__temp_68__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_67__+(0-(__iter_1__+0)))]);
float __temp_70__;
__temp_70__ = (__temp_66__ + __temp_69__);
int __temp_71__;
__temp_71__ = __iter_4__+(1);
int __temp_72__;
__temp_72__ = __iter_5__+(1);
float __temp_73__;
__temp_73__ = (9 * __tilevar_2__[__temp_72__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_71__+(0-(__iter_1__+0)))]);
float __temp_74__;
__temp_74__ = (__temp_70__ + __temp_73__);
int __temp_75__;
__temp_75__ = __iter_4__+(1);
int __temp_76__;
__temp_76__ = __iter_5__+(2);
float __temp_77__;
__temp_77__ = (4 * __tilevar_2__[__temp_76__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_75__+(0-(__iter_1__+0)))]);
float __temp_78__;
__temp_78__ = (__temp_74__ + __temp_77__);
int __temp_79__;
__temp_79__ = __iter_4__+(2);
int __temp_80__;
__temp_80__ = __iter_5__+(-2);
float __temp_81__;
__temp_81__ = (2 * __tilevar_2__[__temp_80__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_79__+(0-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_78__ + __temp_81__);
int __temp_83__;
__temp_83__ = __iter_4__+(2);
int __temp_84__;
__temp_84__ = __iter_5__+(-1);
float __temp_85__;
__temp_85__ = (4 * __tilevar_2__[__temp_84__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_83__+(0-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_82__ + __temp_85__);
int __temp_87__;
__temp_87__ = __iter_4__+(2);
int __temp_88__;
__temp_88__ = __iter_5__;
float __temp_89__;
__temp_89__ = (5 * __tilevar_2__[__temp_88__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_87__+(0-(__iter_1__+0)))]);
float __temp_90__;
__temp_90__ = (__temp_86__ + __temp_89__);
int __temp_91__;
__temp_91__ = __iter_4__+(2);
int __temp_92__;
__temp_92__ = __iter_5__+(1);
float __temp_93__;
__temp_93__ = (4 * __tilevar_2__[__temp_92__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_91__+(0-(__iter_1__+0)))]);
float __temp_94__;
__temp_94__ = (__temp_90__ + __temp_93__);
int __temp_95__;
__temp_95__ = __iter_4__+(2);
int __temp_96__;
__temp_96__ = __iter_5__+(2);
float __temp_97__;
__temp_97__ = (2 * __tilevar_2__[__temp_96__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_95__+(0-(__iter_1__+0)))]);
float __temp_98__;
__temp_98__ = (__temp_94__ + __temp_97__);
float __temp_99__;
__temp_99__ = (__temp_98__ / 159);
__tilevar_3__[__iter_5__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+2)))] = __temp_99__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__+2),2) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+2),2) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){
if (__iter_6__ < (FORMA_MAX((__iter_1__+2),2)+4) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3))-4) || __iter_7__ < (FORMA_MAX((__iter_0__+2),2)+4) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))-4)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+2)))];
}
}
}
__syncthreads();
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__+4),2) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+4),2) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3)) ){
int __temp_150__;
__temp_150__ = __iter_12__+(-2);
int __temp_151__;
__temp_151__ = __iter_13__+(-2);
float __temp_152__;
__temp_152__ = (2 * __tilevar_3__[__temp_151__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_150__+(0-(__iter_1__+2)))]);
int __temp_153__;
__temp_153__ = __iter_12__+(-2);
int __temp_154__;
__temp_154__ = __iter_13__+(-1);
float __temp_155__;
__temp_155__ = (4 * __tilevar_3__[__temp_154__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_153__+(0-(__iter_1__+2)))]);
float __temp_156__;
__temp_156__ = (__temp_152__ + __temp_155__);
int __temp_157__;
__temp_157__ = __iter_12__+(-2);
int __temp_158__;
__temp_158__ = __iter_13__;
float __temp_159__;
__temp_159__ = (5 * __tilevar_3__[__temp_158__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_157__+(0-(__iter_1__+2)))]);
float __temp_160__;
__temp_160__ = (__temp_156__ + __temp_159__);
int __temp_161__;
__temp_161__ = __iter_12__+(-2);
int __temp_162__;
__temp_162__ = __iter_13__+(1);
float __temp_163__;
__temp_163__ = (4 * __tilevar_3__[__temp_162__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_161__+(0-(__iter_1__+2)))]);
float __temp_164__;
__temp_164__ = (__temp_160__ + __temp_163__);
int __temp_165__;
__temp_165__ = __iter_12__+(-2);
int __temp_166__;
__temp_166__ = __iter_13__+(2);
float __temp_167__;
__temp_167__ = (2 * __tilevar_3__[__temp_166__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_165__+(0-(__iter_1__+2)))]);
float __temp_168__;
__temp_168__ = (__temp_164__ + __temp_167__);
int __temp_169__;
__temp_169__ = __iter_12__+(-1);
int __temp_170__;
__temp_170__ = __iter_13__+(-2);
float __temp_171__;
__temp_171__ = (4 * __tilevar_3__[__temp_170__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_169__+(0-(__iter_1__+2)))]);
float __temp_172__;
__temp_172__ = (__temp_168__ + __temp_171__);
int __temp_173__;
__temp_173__ = __iter_12__+(-1);
int __temp_174__;
__temp_174__ = __iter_13__+(-1);
float __temp_175__;
__temp_175__ = (9 * __tilevar_3__[__temp_174__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_173__+(0-(__iter_1__+2)))]);
float __temp_176__;
__temp_176__ = (__temp_172__ + __temp_175__);
int __temp_177__;
__temp_177__ = __iter_12__+(-1);
int __temp_178__;
__temp_178__ = __iter_13__;
float __temp_179__;
__temp_179__ = (12 * __tilevar_3__[__temp_178__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_177__+(0-(__iter_1__+2)))]);
float __temp_180__;
__temp_180__ = (__temp_176__ + __temp_179__);
int __temp_181__;
__temp_181__ = __iter_12__+(-1);
int __temp_182__;
__temp_182__ = __iter_13__+(1);
float __temp_183__;
__temp_183__ = (9 * __tilevar_3__[__temp_182__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_181__+(0-(__iter_1__+2)))]);
float __temp_184__;
__temp_184__ = (__temp_180__ + __temp_183__);
int __temp_185__;
__temp_185__ = __iter_12__+(-1);
int __temp_186__;
__temp_186__ = __iter_13__+(2);
float __temp_187__;
__temp_187__ = (4 * __tilevar_3__[__temp_186__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_185__+(0-(__iter_1__+2)))]);
float __temp_188__;
__temp_188__ = (__temp_184__ + __temp_187__);
int __temp_189__;
__temp_189__ = __iter_12__;
int __temp_190__;
__temp_190__ = __iter_13__+(-2);
float __temp_191__;
__temp_191__ = (5 * __tilevar_3__[__temp_190__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_189__+(0-(__iter_1__+2)))]);
float __temp_192__;
__temp_192__ = (__temp_188__ + __temp_191__);
int __temp_193__;
__temp_193__ = __iter_12__;
int __temp_194__;
__temp_194__ = __iter_13__+(-1);
float __temp_195__;
__temp_195__ = (12 * __tilevar_3__[__temp_194__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_193__+(0-(__iter_1__+2)))]);
float __temp_196__;
__temp_196__ = (__temp_192__ + __temp_195__);
int __temp_197__;
__temp_197__ = __iter_12__;
int __temp_198__;
__temp_198__ = __iter_13__;
float __temp_199__;
__temp_199__ = (15 * __tilevar_3__[__temp_198__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_197__+(0-(__iter_1__+2)))]);
float __temp_200__;
__temp_200__ = (__temp_196__ + __temp_199__);
int __temp_201__;
__temp_201__ = __iter_12__;
int __temp_202__;
__temp_202__ = __iter_13__+(1);
float __temp_203__;
__temp_203__ = (12 * __tilevar_3__[__temp_202__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_201__+(0-(__iter_1__+2)))]);
float __temp_204__;
__temp_204__ = (__temp_200__ + __temp_203__);
int __temp_205__;
__temp_205__ = __iter_12__;
int __temp_206__;
__temp_206__ = __iter_13__+(2);
float __temp_207__;
__temp_207__ = (5 * __tilevar_3__[__temp_206__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_205__+(0-(__iter_1__+2)))]);
float __temp_208__;
__temp_208__ = (__temp_204__ + __temp_207__);
int __temp_209__;
__temp_209__ = __iter_12__+(1);
int __temp_210__;
__temp_210__ = __iter_13__+(-2);
float __temp_211__;
__temp_211__ = (4 * __tilevar_3__[__temp_210__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_209__+(0-(__iter_1__+2)))]);
float __temp_212__;
__temp_212__ = (__temp_208__ + __temp_211__);
int __temp_213__;
__temp_213__ = __iter_12__+(1);
int __temp_214__;
__temp_214__ = __iter_13__+(-1);
float __temp_215__;
__temp_215__ = (9 * __tilevar_3__[__temp_214__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_213__+(0-(__iter_1__+2)))]);
float __temp_216__;
__temp_216__ = (__temp_212__ + __temp_215__);
int __temp_217__;
__temp_217__ = __iter_12__+(1);
int __temp_218__;
__temp_218__ = __iter_13__;
float __temp_219__;
__temp_219__ = (12 * __tilevar_3__[__temp_218__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_217__+(0-(__iter_1__+2)))]);
float __temp_220__;
__temp_220__ = (__temp_216__ + __temp_219__);
int __temp_221__;
__temp_221__ = __iter_12__+(1);
int __temp_222__;
__temp_222__ = __iter_13__+(1);
float __temp_223__;
__temp_223__ = (9 * __tilevar_3__[__temp_222__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_221__+(0-(__iter_1__+2)))]);
float __temp_224__;
__temp_224__ = (__temp_220__ + __temp_223__);
int __temp_225__;
__temp_225__ = __iter_12__+(1);
int __temp_226__;
__temp_226__ = __iter_13__+(2);
float __temp_227__;
__temp_227__ = (4 * __tilevar_3__[__temp_226__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_225__+(0-(__iter_1__+2)))]);
float __temp_228__;
__temp_228__ = (__temp_224__ + __temp_227__);
int __temp_229__;
__temp_229__ = __iter_12__+(2);
int __temp_230__;
__temp_230__ = __iter_13__+(-2);
float __temp_231__;
__temp_231__ = (2 * __tilevar_3__[__temp_230__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_229__+(0-(__iter_1__+2)))]);
float __temp_232__;
__temp_232__ = (__temp_228__ + __temp_231__);
int __temp_233__;
__temp_233__ = __iter_12__+(2);
int __temp_234__;
__temp_234__ = __iter_13__+(-1);
float __temp_235__;
__temp_235__ = (4 * __tilevar_3__[__temp_234__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_233__+(0-(__iter_1__+2)))]);
float __temp_236__;
__temp_236__ = (__temp_232__ + __temp_235__);
int __temp_237__;
__temp_237__ = __iter_12__+(2);
int __temp_238__;
__temp_238__ = __iter_13__;
float __temp_239__;
__temp_239__ = (5 * __tilevar_3__[__temp_238__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_237__+(0-(__iter_1__+2)))]);
float __temp_240__;
__temp_240__ = (__temp_236__ + __temp_239__);
int __temp_241__;
__temp_241__ = __iter_12__+(2);
int __temp_242__;
__temp_242__ = __iter_13__+(1);
float __temp_243__;
__temp_243__ = (4 * __tilevar_3__[__temp_242__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_241__+(0-(__iter_1__+2)))]);
float __temp_244__;
__temp_244__ = (__temp_240__ + __temp_243__);
int __temp_245__;
__temp_245__ = __iter_12__+(2);
int __temp_246__;
__temp_246__ = __iter_13__+(2);
float __temp_247__;
__temp_247__ = (2 * __tilevar_3__[__temp_246__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_245__+(0-(__iter_1__+2)))]);
float __temp_248__;
__temp_248__ = (__temp_244__ + __temp_247__);
float __temp_249__;
__temp_249__ = (__temp_248__ / 159);
__tilevar_4__[__iter_13__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+4)))] = __temp_249__;
}
}
int __iter_14__;
__iter_14__ = FORMA_MAX((__iter_1__+4),2) + (int)(threadIdx.y) ;
if( __iter_14__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3)) ){
int __iter_15__;
__iter_15__ = FORMA_MAX((__iter_0__+4),2) + (int)(threadIdx.x) ;
if( __iter_15__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3)) ){
if (__iter_14__ < (FORMA_MAX((__iter_1__+4),2)+4) || __iter_14__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3))-4) || __iter_15__ < (FORMA_MAX((__iter_0__+4),2)+4) || __iter_15__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))-4)) {
__copy_arr_1__[__iter_15__+(M-0)*(__iter_14__)] = __tilevar_4__[__iter_15__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_14__+(0-(__iter_1__+4)))];
}
}
}
__syncthreads();
int __iter_20__;
__iter_20__ = FORMA_MAX((__iter_1__+6),2) + (int)(threadIdx.y) ;
if( __iter_20__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3)) ){
int __iter_21__;
__iter_21__ = FORMA_MAX((__iter_0__+6),2) + (int)(threadIdx.x) ;
if( __iter_21__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3)) ){
float __temp_300__;
__temp_300__ = (2 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_301__;
__temp_301__ = (4 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_302__;
__temp_302__ = (__temp_300__ + __temp_301__);
float __temp_303__;
__temp_303__ = (5 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_304__;
__temp_304__ = (__temp_302__ + __temp_303__);
float __temp_305__;
__temp_305__ = (4 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_306__;
__temp_306__ = (__temp_304__ + __temp_305__);
float __temp_307__;
__temp_307__ = (2 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_308__;
__temp_308__ = (__temp_306__ + __temp_307__);
float __temp_309__;
__temp_309__ = (4 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_310__;
__temp_310__ = (__temp_308__ + __temp_309__);
float __temp_311__;
__temp_311__ = (9 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_312__;
__temp_312__ = (__temp_310__ + __temp_311__);
float __temp_313__;
__temp_313__ = (12 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_314__;
__temp_314__ = (__temp_312__ + __temp_313__);
float __temp_315__;
__temp_315__ = (9 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_316__;
__temp_316__ = (__temp_314__ + __temp_315__);
float __temp_317__;
__temp_317__ = (4 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_318__;
__temp_318__ = (__temp_316__ + __temp_317__);
float __temp_319__;
__temp_319__ = (5 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_320__;
__temp_320__ = (__temp_318__ + __temp_319__);
float __temp_321__;
__temp_321__ = (12 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_322__;
__temp_322__ = (__temp_320__ + __temp_321__);
float __temp_323__;
__temp_323__ = (15 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_324__;
__temp_324__ = (__temp_322__ + __temp_323__);
float __temp_325__;
__temp_325__ = (12 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_326__;
__temp_326__ = (__temp_324__ + __temp_325__);
float __temp_327__;
__temp_327__ = (5 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_328__;
__temp_328__ = (__temp_326__ + __temp_327__);
float __temp_329__;
__temp_329__ = (4 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_330__;
__temp_330__ = (__temp_328__ + __temp_329__);
float __temp_331__;
__temp_331__ = (9 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_332__;
__temp_332__ = (__temp_330__ + __temp_331__);
float __temp_333__;
__temp_333__ = (12 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_334__;
__temp_334__ = (__temp_332__ + __temp_333__);
float __temp_335__;
__temp_335__ = (9 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_336__;
__temp_336__ = (__temp_334__ + __temp_335__);
float __temp_337__;
__temp_337__ = (4 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_338__;
__temp_338__ = (__temp_336__ + __temp_337__);
float __temp_339__;
__temp_339__ = (2 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_340__;
__temp_340__ = (__temp_338__ + __temp_339__);
float __temp_341__;
__temp_341__ = (4 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_342__;
__temp_342__ = (__temp_340__ + __temp_341__);
float __temp_343__;
__temp_343__ = (5 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_344__;
__temp_344__ = (__temp_342__ + __temp_343__);
float __temp_345__;
__temp_345__ = (4 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_346__;
__temp_346__ = (__temp_344__ + __temp_345__);
float __temp_347__;
__temp_347__ = (2 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_348__;
__temp_348__ = (__temp_346__ + __temp_347__);
float __temp_349__;
__temp_349__ = (__temp_348__ / 159);
__tilevar_5__[__iter_21__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+6)))] = __temp_349__;
}
}
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__+6),2) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__+6),2) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3)) ){
if (__iter_22__ < (FORMA_MAX((__iter_1__+6),2)+4) || __iter_22__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3))-4) || __iter_23__ < (FORMA_MAX((__iter_0__+6),2)+4) || __iter_23__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))-4)) {
__copy_arr_2__[__iter_23__+(M-0)*(__iter_22__)] = __tilevar_5__[__iter_23__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+6)))];
}
}
}
__syncthreads();
int __iter_28__;
__iter_28__ = FORMA_MAX((__iter_1__+8),2) + (int)(threadIdx.y) ;
if( __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(N-3)) ){
int __iter_29__;
__iter_29__ = FORMA_MAX((__iter_0__+8),2) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3)) ){
float __temp_400__;
__temp_400__ = (2 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_401__;
__temp_401__ = (4 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_402__;
__temp_402__ = (__temp_400__ + __temp_401__);
float __temp_403__;
__temp_403__ = (5 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_404__;
__temp_404__ = (__temp_402__ + __temp_403__);
float __temp_405__;
__temp_405__ = (4 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_406__;
__temp_406__ = (__temp_404__ + __temp_405__);
float __temp_407__;
__temp_407__ = (2 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_408__;
__temp_408__ = (__temp_406__ + __temp_407__);
float __temp_409__;
__temp_409__ = (4 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_410__;
__temp_410__ = (__temp_408__ + __temp_409__);
float __temp_411__;
__temp_411__ = (9 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_412__;
__temp_412__ = (__temp_410__ + __temp_411__);
float __temp_413__;
__temp_413__ = (12 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_414__;
__temp_414__ = (__temp_412__ + __temp_413__);
float __temp_415__;
__temp_415__ = (9 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_416__;
__temp_416__ = (__temp_414__ + __temp_415__);
float __temp_417__;
__temp_417__ = (4 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_418__;
__temp_418__ = (__temp_416__ + __temp_417__);
float __temp_419__;
__temp_419__ = (5 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_420__;
__temp_420__ = (__temp_418__ + __temp_419__);
float __temp_421__;
__temp_421__ = (12 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_422__;
__temp_422__ = (__temp_420__ + __temp_421__);
float __temp_423__;
__temp_423__ = (15 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_424__;
__temp_424__ = (__temp_422__ + __temp_423__);
float __temp_425__;
__temp_425__ = (12 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_426__;
__temp_426__ = (__temp_424__ + __temp_425__);
float __temp_427__;
__temp_427__ = (5 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_428__;
__temp_428__ = (__temp_426__ + __temp_427__);
float __temp_429__;
__temp_429__ = (4 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_430__;
__temp_430__ = (__temp_428__ + __temp_429__);
float __temp_431__;
__temp_431__ = (9 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_432__;
__temp_432__ = (__temp_430__ + __temp_431__);
float __temp_433__;
__temp_433__ = (12 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_434__;
__temp_434__ = (__temp_432__ + __temp_433__);
float __temp_435__;
__temp_435__ = (9 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_436__;
__temp_436__ = (__temp_434__ + __temp_435__);
float __temp_437__;
__temp_437__ = (4 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_438__;
__temp_438__ = (__temp_436__ + __temp_437__);
float __temp_439__;
__temp_439__ = (2 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_440__;
__temp_440__ = (__temp_438__ + __temp_439__);
float __temp_441__;
__temp_441__ = (4 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_442__;
__temp_442__ = (__temp_440__ + __temp_441__);
float __temp_443__;
__temp_443__ = (5 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_444__;
__temp_444__ = (__temp_442__ + __temp_443__);
float __temp_445__;
__temp_445__ = (4 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_446__;
__temp_446__ = (__temp_444__ + __temp_445__);
float __temp_447__;
__temp_447__ = (2 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_448__;
__temp_448__ = (__temp_446__ + __temp_447__);
float __temp_449__;
__temp_449__ = (__temp_448__ / 159);
__var_1__[__iter_29__+(M-0)*(__iter_28__)] = __temp_449__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __copy_arr_0__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_8__;
__iter_8__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_8__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_8__ += (int)(blockDim.y) ){
int __iter_9__;
__iter_9__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_9__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_9__ += (int)(blockDim.x) ){
if (__iter_8__ < (FORMA_MAX((__iter_1__+2),2)+2) || __iter_8__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3))-2) || __iter_9__ < (FORMA_MAX((__iter_0__+2),2)+2) || __iter_9__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))-2)) {
__tilevar_2__[__iter_9__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_8__+(0-(__iter_1__-2)))] = input[__iter_9__+(M-0)*(__iter_8__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_10__ += (int)(blockDim.y) ){
int __iter_11__;
__iter_11__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_10__ < FORMA_MAX((__iter_1__+2),2) || __iter_10__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3)) || __iter_11__ < FORMA_MAX((__iter_0__+2),2) || __iter_11__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
float __temp_100__;
__temp_100__ = (2 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_101__;
__temp_101__ = (4 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_102__;
__temp_102__ = (__temp_100__ + __temp_101__);
float __temp_103__;
__temp_103__ = (5 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_104__;
__temp_104__ = (__temp_102__ + __temp_103__);
float __temp_105__;
__temp_105__ = (4 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_106__;
__temp_106__ = (__temp_104__ + __temp_105__);
float __temp_107__;
__temp_107__ = (2 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_108__;
__temp_108__ = (__temp_106__ + __temp_107__);
float __temp_109__;
__temp_109__ = (4 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_110__;
__temp_110__ = (__temp_108__ + __temp_109__);
float __temp_111__;
__temp_111__ = (9 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_112__;
__temp_112__ = (__temp_110__ + __temp_111__);
float __temp_113__;
__temp_113__ = (12 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_114__;
__temp_114__ = (__temp_112__ + __temp_113__);
float __temp_115__;
__temp_115__ = (9 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
float __temp_117__;
__temp_117__ = (4 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
float __temp_119__;
__temp_119__ = (5 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
float __temp_121__;
__temp_121__ = (12 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
float __temp_123__;
__temp_123__ = (15 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
float __temp_125__;
__temp_125__ = (12 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
float __temp_127__;
__temp_127__ = (5 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
float __temp_129__;
__temp_129__ = (4 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
float __temp_131__;
__temp_131__ = (9 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
float __temp_133__;
__temp_133__ = (12 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
float __temp_135__;
__temp_135__ = (9 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
float __temp_137__;
__temp_137__ = (4 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
float __temp_139__;
__temp_139__ = (2 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
float __temp_141__;
__temp_141__ = (4 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
float __temp_143__;
__temp_143__ = (5 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
float __temp_145__;
__temp_145__ = (4 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
float __temp_147__;
__temp_147__ = (2 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
float __temp_149__;
__temp_149__ = (__temp_148__ / 159);
__copy_arr_0__[__iter_11__+(M-0)*(__iter_10__)] = __temp_149__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__1__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__2__(float * __restrict__ __copy_arr_0__, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __copy_arr_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_16__ += (int)(blockDim.y) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_17__ += (int)(blockDim.x) ){
if (__iter_16__ < (FORMA_MAX((__iter_1__+4),2)+2) || __iter_16__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3))-2) || __iter_17__ < (FORMA_MAX((__iter_0__+4),2)+2) || __iter_17__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))-2)) {
__tilevar_3__[__iter_17__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_16__+(0-(__iter_1__-2)))] = __copy_arr_0__[__iter_17__+(M-0)*(__iter_16__)];
}
}
}
__syncthreads();
int __iter_18__;
__iter_18__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_18__ += (int)(blockDim.y) ){
int __iter_19__;
__iter_19__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_18__ < FORMA_MAX((__iter_1__+4),2) || __iter_18__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3)) || __iter_19__ < FORMA_MAX((__iter_0__+4),2) || __iter_19__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
float __temp_250__;
__temp_250__ = (2 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_251__;
__temp_251__ = (4 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_252__;
__temp_252__ = (__temp_250__ + __temp_251__);
float __temp_253__;
__temp_253__ = (5 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_254__;
__temp_254__ = (__temp_252__ + __temp_253__);
float __temp_255__;
__temp_255__ = (4 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_256__;
__temp_256__ = (__temp_254__ + __temp_255__);
float __temp_257__;
__temp_257__ = (2 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_258__;
__temp_258__ = (__temp_256__ + __temp_257__);
float __temp_259__;
__temp_259__ = (4 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_260__;
__temp_260__ = (__temp_258__ + __temp_259__);
float __temp_261__;
__temp_261__ = (9 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_262__;
__temp_262__ = (__temp_260__ + __temp_261__);
float __temp_263__;
__temp_263__ = (12 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_264__;
__temp_264__ = (__temp_262__ + __temp_263__);
float __temp_265__;
__temp_265__ = (9 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_266__;
__temp_266__ = (__temp_264__ + __temp_265__);
float __temp_267__;
__temp_267__ = (4 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_268__;
__temp_268__ = (__temp_266__ + __temp_267__);
float __temp_269__;
__temp_269__ = (5 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_270__;
__temp_270__ = (__temp_268__ + __temp_269__);
float __temp_271__;
__temp_271__ = (12 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_272__;
__temp_272__ = (__temp_270__ + __temp_271__);
float __temp_273__;
__temp_273__ = (15 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_274__;
__temp_274__ = (__temp_272__ + __temp_273__);
float __temp_275__;
__temp_275__ = (12 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_276__;
__temp_276__ = (__temp_274__ + __temp_275__);
float __temp_277__;
__temp_277__ = (5 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_278__;
__temp_278__ = (__temp_276__ + __temp_277__);
float __temp_279__;
__temp_279__ = (4 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_280__;
__temp_280__ = (__temp_278__ + __temp_279__);
float __temp_281__;
__temp_281__ = (9 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_282__;
__temp_282__ = (__temp_280__ + __temp_281__);
float __temp_283__;
__temp_283__ = (12 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_284__;
__temp_284__ = (__temp_282__ + __temp_283__);
float __temp_285__;
__temp_285__ = (9 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_286__;
__temp_286__ = (__temp_284__ + __temp_285__);
float __temp_287__;
__temp_287__ = (4 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_288__;
__temp_288__ = (__temp_286__ + __temp_287__);
float __temp_289__;
__temp_289__ = (2 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_290__;
__temp_290__ = (__temp_288__ + __temp_289__);
float __temp_291__;
__temp_291__ = (4 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_292__;
__temp_292__ = (__temp_290__ + __temp_291__);
float __temp_293__;
__temp_293__ = (5 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_294__;
__temp_294__ = (__temp_292__ + __temp_293__);
float __temp_295__;
__temp_295__ = (4 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_296__;
__temp_296__ = (__temp_294__ + __temp_295__);
float __temp_297__;
__temp_297__ = (2 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_298__;
__temp_298__ = (__temp_296__ + __temp_297__);
float __temp_299__;
__temp_299__ = (__temp_298__ / 159);
__copy_arr_1__[__iter_19__+(M-0)*(__iter_18__)] = __temp_299__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__2__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__3__(float * __restrict__ __copy_arr_1__, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __copy_arr_2__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_24__;
__iter_24__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_24__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_24__ += (int)(blockDim.y) ){
int __iter_25__;
__iter_25__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_25__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_25__ += (int)(blockDim.x) ){
if (__iter_24__ < (FORMA_MAX((__iter_1__+6),2)+2) || __iter_24__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3))-2) || __iter_25__ < (FORMA_MAX((__iter_0__+6),2)+2) || __iter_25__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))-2)) {
__tilevar_4__[__iter_25__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_24__+(0-(__iter_1__-2)))] = __copy_arr_1__[__iter_25__+(M-0)*(__iter_24__)];
}
}
}
__syncthreads();
int __iter_26__;
__iter_26__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_26__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_26__ += (int)(blockDim.y) ){
int __iter_27__;
__iter_27__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_27__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_26__ < FORMA_MAX((__iter_1__+6),2) || __iter_26__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3)) || __iter_27__ < FORMA_MAX((__iter_0__+6),2) || __iter_27__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
float __temp_350__;
__temp_350__ = (2 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_351__;
__temp_351__ = (4 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_352__;
__temp_352__ = (__temp_350__ + __temp_351__);
float __temp_353__;
__temp_353__ = (5 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_354__;
__temp_354__ = (__temp_352__ + __temp_353__);
float __temp_355__;
__temp_355__ = (4 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_356__;
__temp_356__ = (__temp_354__ + __temp_355__);
float __temp_357__;
__temp_357__ = (2 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_358__;
__temp_358__ = (__temp_356__ + __temp_357__);
float __temp_359__;
__temp_359__ = (4 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_360__;
__temp_360__ = (__temp_358__ + __temp_359__);
float __temp_361__;
__temp_361__ = (9 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_362__;
__temp_362__ = (__temp_360__ + __temp_361__);
float __temp_363__;
__temp_363__ = (12 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_364__;
__temp_364__ = (__temp_362__ + __temp_363__);
float __temp_365__;
__temp_365__ = (9 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_366__;
__temp_366__ = (__temp_364__ + __temp_365__);
float __temp_367__;
__temp_367__ = (4 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_368__;
__temp_368__ = (__temp_366__ + __temp_367__);
float __temp_369__;
__temp_369__ = (5 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_370__;
__temp_370__ = (__temp_368__ + __temp_369__);
float __temp_371__;
__temp_371__ = (12 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_372__;
__temp_372__ = (__temp_370__ + __temp_371__);
float __temp_373__;
__temp_373__ = (15 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_374__;
__temp_374__ = (__temp_372__ + __temp_373__);
float __temp_375__;
__temp_375__ = (12 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_376__;
__temp_376__ = (__temp_374__ + __temp_375__);
float __temp_377__;
__temp_377__ = (5 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_378__;
__temp_378__ = (__temp_376__ + __temp_377__);
float __temp_379__;
__temp_379__ = (4 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_380__;
__temp_380__ = (__temp_378__ + __temp_379__);
float __temp_381__;
__temp_381__ = (9 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_382__;
__temp_382__ = (__temp_380__ + __temp_381__);
float __temp_383__;
__temp_383__ = (12 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_384__;
__temp_384__ = (__temp_382__ + __temp_383__);
float __temp_385__;
__temp_385__ = (9 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_386__;
__temp_386__ = (__temp_384__ + __temp_385__);
float __temp_387__;
__temp_387__ = (4 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_388__;
__temp_388__ = (__temp_386__ + __temp_387__);
float __temp_389__;
__temp_389__ = (2 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_390__;
__temp_390__ = (__temp_388__ + __temp_389__);
float __temp_391__;
__temp_391__ = (4 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_392__;
__temp_392__ = (__temp_390__ + __temp_391__);
float __temp_393__;
__temp_393__ = (5 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_394__;
__temp_394__ = (__temp_392__ + __temp_393__);
float __temp_395__;
__temp_395__ = (4 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_396__;
__temp_396__ = (__temp_394__ + __temp_395__);
float __temp_397__;
__temp_397__ = (2 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_398__;
__temp_398__ = (__temp_396__ + __temp_397__);
float __temp_399__;
__temp_399__ = (__temp_398__ / 159);
__copy_arr_2__[__iter_27__+(M-0)*(__iter_26__)] = __temp_399__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__3__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__4__(float * __restrict__ __copy_arr_2__, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_30__;
__iter_30__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_30__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_30__ += (int)(blockDim.y) ){
int __iter_31__;
__iter_31__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_31__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_31__ += (int)(blockDim.x) ){
if (__iter_30__ < (FORMA_MAX((__iter_1__+8),2)+2) || __iter_30__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(N-3))-2) || __iter_31__ < (FORMA_MAX((__iter_0__+8),2)+2) || __iter_31__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))-2)) {
__tilevar_5__[__iter_31__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_30__+(0-(__iter_1__-2)))] = __copy_arr_2__[__iter_31__+(M-0)*(__iter_30__)];
}
}
}
__syncthreads();
int __iter_32__;
__iter_32__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_32__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_32__ += (int)(blockDim.y) ){
int __iter_33__;
__iter_33__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_33__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_32__ < FORMA_MAX((__iter_1__+8),2) || __iter_32__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(N-3)) || __iter_33__ < FORMA_MAX((__iter_0__+8),2) || __iter_33__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
float __temp_450__;
__temp_450__ = (2 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_451__;
__temp_451__ = (4 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_452__;
__temp_452__ = (__temp_450__ + __temp_451__);
float __temp_453__;
__temp_453__ = (5 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_454__;
__temp_454__ = (__temp_452__ + __temp_453__);
float __temp_455__;
__temp_455__ = (4 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_456__;
__temp_456__ = (__temp_454__ + __temp_455__);
float __temp_457__;
__temp_457__ = (2 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_458__;
__temp_458__ = (__temp_456__ + __temp_457__);
float __temp_459__;
__temp_459__ = (4 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_460__;
__temp_460__ = (__temp_458__ + __temp_459__);
float __temp_461__;
__temp_461__ = (9 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_462__;
__temp_462__ = (__temp_460__ + __temp_461__);
float __temp_463__;
__temp_463__ = (12 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_464__;
__temp_464__ = (__temp_462__ + __temp_463__);
float __temp_465__;
__temp_465__ = (9 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_466__;
__temp_466__ = (__temp_464__ + __temp_465__);
float __temp_467__;
__temp_467__ = (4 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_468__;
__temp_468__ = (__temp_466__ + __temp_467__);
float __temp_469__;
__temp_469__ = (5 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_470__;
__temp_470__ = (__temp_468__ + __temp_469__);
float __temp_471__;
__temp_471__ = (12 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_472__;
__temp_472__ = (__temp_470__ + __temp_471__);
float __temp_473__;
__temp_473__ = (15 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_474__;
__temp_474__ = (__temp_472__ + __temp_473__);
float __temp_475__;
__temp_475__ = (12 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_476__;
__temp_476__ = (__temp_474__ + __temp_475__);
float __temp_477__;
__temp_477__ = (5 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_478__;
__temp_478__ = (__temp_476__ + __temp_477__);
float __temp_479__;
__temp_479__ = (4 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_480__;
__temp_480__ = (__temp_478__ + __temp_479__);
float __temp_481__;
__temp_481__ = (9 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_482__;
__temp_482__ = (__temp_480__ + __temp_481__);
float __temp_483__;
__temp_483__ = (12 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_484__;
__temp_484__ = (__temp_482__ + __temp_483__);
float __temp_485__;
__temp_485__ = (9 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_486__;
__temp_486__ = (__temp_484__ + __temp_485__);
float __temp_487__;
__temp_487__ = (4 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_488__;
__temp_488__ = (__temp_486__ + __temp_487__);
float __temp_489__;
__temp_489__ = (2 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_490__;
__temp_490__ = (__temp_488__ + __temp_489__);
float __temp_491__;
__temp_491__ = (4 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_492__;
__temp_492__ = (__temp_490__ + __temp_491__);
float __temp_493__;
__temp_493__ = (5 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_494__;
__temp_494__ = (__temp_492__ + __temp_493__);
float __temp_495__;
__temp_495__ = (4 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_496__;
__temp_496__ = (__temp_494__ + __temp_495__);
float __temp_497__;
__temp_497__ = (2 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_498__;
__temp_498__ = (__temp_496__ + __temp_497__);
float __temp_499__;
__temp_499__ = (__temp_498__ / 159);
__var_1__[__iter_33__+(M-0)*(__iter_32__)] = __temp_499__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__4__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),13);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__;
int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__;
int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),13);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel___forma_kernel__0__.y/2 > 13)
__blockConfig___kernel___forma_kernel__0__.y /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,13))
__blockConfig___kernel___forma_kernel__0__.x /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
}
__blockConfig___kernel___forma_kernel__0__.y = 16;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
int __SMemSize___kernel___forma_kernel__1__ = 0;
__SMemSize___kernel___forma_kernel__1__ = __blockSizeToSMemSize___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__1__), __SMemSize___kernel___forma_kernel__1__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __copy_arr_0__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
int __SMemSize___kernel___forma_kernel__2__ = 0;
__SMemSize___kernel___forma_kernel__2__ = __blockSizeToSMemSize___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__2__), __SMemSize___kernel___forma_kernel__2__, 0, __copy_arr_0__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __copy_arr_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
int __SMemSize___kernel___forma_kernel__3__ = 0;
__SMemSize___kernel___forma_kernel__3__ = __blockSizeToSMemSize___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__3__), __SMemSize___kernel___forma_kernel__3__, 0, __copy_arr_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __copy_arr_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
int __SMemSize___kernel___forma_kernel__4__ = 0;
__SMemSize___kernel___forma_kernel__4__ = __blockSizeToSMemSize___kernel___forma_kernel__4__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__4__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
hipLaunchKernelGGL(( __kernel___forma_kernel__4__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__4__), __SMemSize___kernel___forma_kernel__4__, 0, __copy_arr_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__4__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| 7a61152d310f2a626aa646daef29471af517b301.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float* __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
float * __tilevar_2__ = __tilevar_0__;
float * __tilevar_3__ = __tilevar_1__;
float * __tilevar_4__ = __tilevar_0__;
float * __tilevar_5__ = __tilevar_1__;
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_2__;
__iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
if( __iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) ){
int __iter_3__;
__iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if( __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1)) ){
__tilevar_2__[__iter_3__+(0-__iter_0__)+(FORMA_BLOCKDIM_X-0)*(__iter_2__+(0-__iter_1__))] = input[__iter_3__+(M-0)*(__iter_2__)];
}
}
__syncthreads();
int __iter_4__;
__iter_4__ = FORMA_MAX((__iter_1__+2),2) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3)) ){
int __iter_5__;
__iter_5__ = FORMA_MAX((__iter_0__+2),2) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){
int __temp_0__;
__temp_0__ = __iter_4__+(-2);
int __temp_1__;
__temp_1__ = __iter_5__+(-2);
float __temp_2__;
__temp_2__ = (2 * __tilevar_2__[__temp_1__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_0__+(0-(__iter_1__+0)))]);
int __temp_3__;
__temp_3__ = __iter_4__+(-2);
int __temp_4__;
__temp_4__ = __iter_5__+(-1);
float __temp_5__;
__temp_5__ = (4 * __tilevar_2__[__temp_4__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_3__+(0-(__iter_1__+0)))]);
float __temp_6__;
__temp_6__ = (__temp_2__ + __temp_5__);
int __temp_7__;
__temp_7__ = __iter_4__+(-2);
int __temp_8__;
__temp_8__ = __iter_5__;
float __temp_9__;
__temp_9__ = (5 * __tilevar_2__[__temp_8__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_7__+(0-(__iter_1__+0)))]);
float __temp_10__;
__temp_10__ = (__temp_6__ + __temp_9__);
int __temp_11__;
__temp_11__ = __iter_4__+(-2);
int __temp_12__;
__temp_12__ = __iter_5__+(1);
float __temp_13__;
__temp_13__ = (4 * __tilevar_2__[__temp_12__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_11__+(0-(__iter_1__+0)))]);
float __temp_14__;
__temp_14__ = (__temp_10__ + __temp_13__);
int __temp_15__;
__temp_15__ = __iter_4__+(-2);
int __temp_16__;
__temp_16__ = __iter_5__+(2);
float __temp_17__;
__temp_17__ = (2 * __tilevar_2__[__temp_16__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_15__+(0-(__iter_1__+0)))]);
float __temp_18__;
__temp_18__ = (__temp_14__ + __temp_17__);
int __temp_19__;
__temp_19__ = __iter_4__+(-1);
int __temp_20__;
__temp_20__ = __iter_5__+(-2);
float __temp_21__;
__temp_21__ = (4 * __tilevar_2__[__temp_20__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_19__+(0-(__iter_1__+0)))]);
float __temp_22__;
__temp_22__ = (__temp_18__ + __temp_21__);
int __temp_23__;
__temp_23__ = __iter_4__+(-1);
int __temp_24__;
__temp_24__ = __iter_5__+(-1);
float __temp_25__;
__temp_25__ = (9 * __tilevar_2__[__temp_24__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_23__+(0-(__iter_1__+0)))]);
float __temp_26__;
__temp_26__ = (__temp_22__ + __temp_25__);
int __temp_27__;
__temp_27__ = __iter_4__+(-1);
int __temp_28__;
__temp_28__ = __iter_5__;
float __temp_29__;
__temp_29__ = (12 * __tilevar_2__[__temp_28__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_27__+(0-(__iter_1__+0)))]);
float __temp_30__;
__temp_30__ = (__temp_26__ + __temp_29__);
int __temp_31__;
__temp_31__ = __iter_4__+(-1);
int __temp_32__;
__temp_32__ = __iter_5__+(1);
float __temp_33__;
__temp_33__ = (9 * __tilevar_2__[__temp_32__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_31__+(0-(__iter_1__+0)))]);
float __temp_34__;
__temp_34__ = (__temp_30__ + __temp_33__);
int __temp_35__;
__temp_35__ = __iter_4__+(-1);
int __temp_36__;
__temp_36__ = __iter_5__+(2);
float __temp_37__;
__temp_37__ = (4 * __tilevar_2__[__temp_36__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_35__+(0-(__iter_1__+0)))]);
float __temp_38__;
__temp_38__ = (__temp_34__ + __temp_37__);
int __temp_39__;
__temp_39__ = __iter_4__;
int __temp_40__;
__temp_40__ = __iter_5__+(-2);
float __temp_41__;
__temp_41__ = (5 * __tilevar_2__[__temp_40__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_39__+(0-(__iter_1__+0)))]);
float __temp_42__;
__temp_42__ = (__temp_38__ + __temp_41__);
int __temp_43__;
__temp_43__ = __iter_4__;
int __temp_44__;
__temp_44__ = __iter_5__+(-1);
float __temp_45__;
__temp_45__ = (12 * __tilevar_2__[__temp_44__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_43__+(0-(__iter_1__+0)))]);
float __temp_46__;
__temp_46__ = (__temp_42__ + __temp_45__);
int __temp_47__;
__temp_47__ = __iter_4__;
int __temp_48__;
__temp_48__ = __iter_5__;
float __temp_49__;
__temp_49__ = (15 * __tilevar_2__[__temp_48__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_47__+(0-(__iter_1__+0)))]);
float __temp_50__;
__temp_50__ = (__temp_46__ + __temp_49__);
int __temp_51__;
__temp_51__ = __iter_4__;
int __temp_52__;
__temp_52__ = __iter_5__+(1);
float __temp_53__;
__temp_53__ = (12 * __tilevar_2__[__temp_52__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_51__+(0-(__iter_1__+0)))]);
float __temp_54__;
__temp_54__ = (__temp_50__ + __temp_53__);
int __temp_55__;
__temp_55__ = __iter_4__;
int __temp_56__;
__temp_56__ = __iter_5__+(2);
float __temp_57__;
__temp_57__ = (5 * __tilevar_2__[__temp_56__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_55__+(0-(__iter_1__+0)))]);
float __temp_58__;
__temp_58__ = (__temp_54__ + __temp_57__);
int __temp_59__;
__temp_59__ = __iter_4__+(1);
int __temp_60__;
__temp_60__ = __iter_5__+(-2);
float __temp_61__;
__temp_61__ = (4 * __tilevar_2__[__temp_60__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_59__+(0-(__iter_1__+0)))]);
float __temp_62__;
__temp_62__ = (__temp_58__ + __temp_61__);
int __temp_63__;
__temp_63__ = __iter_4__+(1);
int __temp_64__;
__temp_64__ = __iter_5__+(-1);
float __temp_65__;
__temp_65__ = (9 * __tilevar_2__[__temp_64__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_63__+(0-(__iter_1__+0)))]);
float __temp_66__;
__temp_66__ = (__temp_62__ + __temp_65__);
int __temp_67__;
__temp_67__ = __iter_4__+(1);
int __temp_68__;
__temp_68__ = __iter_5__;
float __temp_69__;
__temp_69__ = (12 * __tilevar_2__[__temp_68__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_67__+(0-(__iter_1__+0)))]);
float __temp_70__;
__temp_70__ = (__temp_66__ + __temp_69__);
int __temp_71__;
__temp_71__ = __iter_4__+(1);
int __temp_72__;
__temp_72__ = __iter_5__+(1);
float __temp_73__;
__temp_73__ = (9 * __tilevar_2__[__temp_72__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_71__+(0-(__iter_1__+0)))]);
float __temp_74__;
__temp_74__ = (__temp_70__ + __temp_73__);
int __temp_75__;
__temp_75__ = __iter_4__+(1);
int __temp_76__;
__temp_76__ = __iter_5__+(2);
float __temp_77__;
__temp_77__ = (4 * __tilevar_2__[__temp_76__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_75__+(0-(__iter_1__+0)))]);
float __temp_78__;
__temp_78__ = (__temp_74__ + __temp_77__);
int __temp_79__;
__temp_79__ = __iter_4__+(2);
int __temp_80__;
__temp_80__ = __iter_5__+(-2);
float __temp_81__;
__temp_81__ = (2 * __tilevar_2__[__temp_80__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_79__+(0-(__iter_1__+0)))]);
float __temp_82__;
__temp_82__ = (__temp_78__ + __temp_81__);
int __temp_83__;
__temp_83__ = __iter_4__+(2);
int __temp_84__;
__temp_84__ = __iter_5__+(-1);
float __temp_85__;
__temp_85__ = (4 * __tilevar_2__[__temp_84__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_83__+(0-(__iter_1__+0)))]);
float __temp_86__;
__temp_86__ = (__temp_82__ + __temp_85__);
int __temp_87__;
__temp_87__ = __iter_4__+(2);
int __temp_88__;
__temp_88__ = __iter_5__;
float __temp_89__;
__temp_89__ = (5 * __tilevar_2__[__temp_88__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_87__+(0-(__iter_1__+0)))]);
float __temp_90__;
__temp_90__ = (__temp_86__ + __temp_89__);
int __temp_91__;
__temp_91__ = __iter_4__+(2);
int __temp_92__;
__temp_92__ = __iter_5__+(1);
float __temp_93__;
__temp_93__ = (4 * __tilevar_2__[__temp_92__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_91__+(0-(__iter_1__+0)))]);
float __temp_94__;
__temp_94__ = (__temp_90__ + __temp_93__);
int __temp_95__;
__temp_95__ = __iter_4__+(2);
int __temp_96__;
__temp_96__ = __iter_5__+(2);
float __temp_97__;
__temp_97__ = (2 * __tilevar_2__[__temp_96__+(0-(__iter_0__+0))+(FORMA_BLOCKDIM_X-0)*(__temp_95__+(0-(__iter_1__+0)))]);
float __temp_98__;
__temp_98__ = (__temp_94__ + __temp_97__);
float __temp_99__;
__temp_99__ = (__temp_98__ / 159);
__tilevar_3__[__iter_5__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_4__+(0-(__iter_1__+2)))] = __temp_99__;
}
}
int __iter_6__;
__iter_6__ = FORMA_MAX((__iter_1__+2),2) + (int)(threadIdx.y) ;
if( __iter_6__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3)) ){
int __iter_7__;
__iter_7__ = FORMA_MAX((__iter_0__+2),2) + (int)(threadIdx.x) ;
if( __iter_7__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3)) ){
if (__iter_6__ < (FORMA_MAX((__iter_1__+2),2)+4) || __iter_6__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3))-4) || __iter_7__ < (FORMA_MAX((__iter_0__+2),2)+4) || __iter_7__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))-4)) {
__copy_arr_0__[__iter_7__+(M-0)*(__iter_6__)] = __tilevar_3__[__iter_7__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__iter_6__+(0-(__iter_1__+2)))];
}
}
}
__syncthreads();
int __iter_12__;
__iter_12__ = FORMA_MAX((__iter_1__+4),2) + (int)(threadIdx.y) ;
if( __iter_12__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3)) ){
int __iter_13__;
__iter_13__ = FORMA_MAX((__iter_0__+4),2) + (int)(threadIdx.x) ;
if( __iter_13__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3)) ){
int __temp_150__;
__temp_150__ = __iter_12__+(-2);
int __temp_151__;
__temp_151__ = __iter_13__+(-2);
float __temp_152__;
__temp_152__ = (2 * __tilevar_3__[__temp_151__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_150__+(0-(__iter_1__+2)))]);
int __temp_153__;
__temp_153__ = __iter_12__+(-2);
int __temp_154__;
__temp_154__ = __iter_13__+(-1);
float __temp_155__;
__temp_155__ = (4 * __tilevar_3__[__temp_154__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_153__+(0-(__iter_1__+2)))]);
float __temp_156__;
__temp_156__ = (__temp_152__ + __temp_155__);
int __temp_157__;
__temp_157__ = __iter_12__+(-2);
int __temp_158__;
__temp_158__ = __iter_13__;
float __temp_159__;
__temp_159__ = (5 * __tilevar_3__[__temp_158__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_157__+(0-(__iter_1__+2)))]);
float __temp_160__;
__temp_160__ = (__temp_156__ + __temp_159__);
int __temp_161__;
__temp_161__ = __iter_12__+(-2);
int __temp_162__;
__temp_162__ = __iter_13__+(1);
float __temp_163__;
__temp_163__ = (4 * __tilevar_3__[__temp_162__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_161__+(0-(__iter_1__+2)))]);
float __temp_164__;
__temp_164__ = (__temp_160__ + __temp_163__);
int __temp_165__;
__temp_165__ = __iter_12__+(-2);
int __temp_166__;
__temp_166__ = __iter_13__+(2);
float __temp_167__;
__temp_167__ = (2 * __tilevar_3__[__temp_166__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_165__+(0-(__iter_1__+2)))]);
float __temp_168__;
__temp_168__ = (__temp_164__ + __temp_167__);
int __temp_169__;
__temp_169__ = __iter_12__+(-1);
int __temp_170__;
__temp_170__ = __iter_13__+(-2);
float __temp_171__;
__temp_171__ = (4 * __tilevar_3__[__temp_170__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_169__+(0-(__iter_1__+2)))]);
float __temp_172__;
__temp_172__ = (__temp_168__ + __temp_171__);
int __temp_173__;
__temp_173__ = __iter_12__+(-1);
int __temp_174__;
__temp_174__ = __iter_13__+(-1);
float __temp_175__;
__temp_175__ = (9 * __tilevar_3__[__temp_174__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_173__+(0-(__iter_1__+2)))]);
float __temp_176__;
__temp_176__ = (__temp_172__ + __temp_175__);
int __temp_177__;
__temp_177__ = __iter_12__+(-1);
int __temp_178__;
__temp_178__ = __iter_13__;
float __temp_179__;
__temp_179__ = (12 * __tilevar_3__[__temp_178__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_177__+(0-(__iter_1__+2)))]);
float __temp_180__;
__temp_180__ = (__temp_176__ + __temp_179__);
int __temp_181__;
__temp_181__ = __iter_12__+(-1);
int __temp_182__;
__temp_182__ = __iter_13__+(1);
float __temp_183__;
__temp_183__ = (9 * __tilevar_3__[__temp_182__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_181__+(0-(__iter_1__+2)))]);
float __temp_184__;
__temp_184__ = (__temp_180__ + __temp_183__);
int __temp_185__;
__temp_185__ = __iter_12__+(-1);
int __temp_186__;
__temp_186__ = __iter_13__+(2);
float __temp_187__;
__temp_187__ = (4 * __tilevar_3__[__temp_186__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_185__+(0-(__iter_1__+2)))]);
float __temp_188__;
__temp_188__ = (__temp_184__ + __temp_187__);
int __temp_189__;
__temp_189__ = __iter_12__;
int __temp_190__;
__temp_190__ = __iter_13__+(-2);
float __temp_191__;
__temp_191__ = (5 * __tilevar_3__[__temp_190__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_189__+(0-(__iter_1__+2)))]);
float __temp_192__;
__temp_192__ = (__temp_188__ + __temp_191__);
int __temp_193__;
__temp_193__ = __iter_12__;
int __temp_194__;
__temp_194__ = __iter_13__+(-1);
float __temp_195__;
__temp_195__ = (12 * __tilevar_3__[__temp_194__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_193__+(0-(__iter_1__+2)))]);
float __temp_196__;
__temp_196__ = (__temp_192__ + __temp_195__);
int __temp_197__;
__temp_197__ = __iter_12__;
int __temp_198__;
__temp_198__ = __iter_13__;
float __temp_199__;
__temp_199__ = (15 * __tilevar_3__[__temp_198__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_197__+(0-(__iter_1__+2)))]);
float __temp_200__;
__temp_200__ = (__temp_196__ + __temp_199__);
int __temp_201__;
__temp_201__ = __iter_12__;
int __temp_202__;
__temp_202__ = __iter_13__+(1);
float __temp_203__;
__temp_203__ = (12 * __tilevar_3__[__temp_202__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_201__+(0-(__iter_1__+2)))]);
float __temp_204__;
__temp_204__ = (__temp_200__ + __temp_203__);
int __temp_205__;
__temp_205__ = __iter_12__;
int __temp_206__;
__temp_206__ = __iter_13__+(2);
float __temp_207__;
__temp_207__ = (5 * __tilevar_3__[__temp_206__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_205__+(0-(__iter_1__+2)))]);
float __temp_208__;
__temp_208__ = (__temp_204__ + __temp_207__);
int __temp_209__;
__temp_209__ = __iter_12__+(1);
int __temp_210__;
__temp_210__ = __iter_13__+(-2);
float __temp_211__;
__temp_211__ = (4 * __tilevar_3__[__temp_210__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_209__+(0-(__iter_1__+2)))]);
float __temp_212__;
__temp_212__ = (__temp_208__ + __temp_211__);
int __temp_213__;
__temp_213__ = __iter_12__+(1);
int __temp_214__;
__temp_214__ = __iter_13__+(-1);
float __temp_215__;
__temp_215__ = (9 * __tilevar_3__[__temp_214__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_213__+(0-(__iter_1__+2)))]);
float __temp_216__;
__temp_216__ = (__temp_212__ + __temp_215__);
int __temp_217__;
__temp_217__ = __iter_12__+(1);
int __temp_218__;
__temp_218__ = __iter_13__;
float __temp_219__;
__temp_219__ = (12 * __tilevar_3__[__temp_218__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_217__+(0-(__iter_1__+2)))]);
float __temp_220__;
__temp_220__ = (__temp_216__ + __temp_219__);
int __temp_221__;
__temp_221__ = __iter_12__+(1);
int __temp_222__;
__temp_222__ = __iter_13__+(1);
float __temp_223__;
__temp_223__ = (9 * __tilevar_3__[__temp_222__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_221__+(0-(__iter_1__+2)))]);
float __temp_224__;
__temp_224__ = (__temp_220__ + __temp_223__);
int __temp_225__;
__temp_225__ = __iter_12__+(1);
int __temp_226__;
__temp_226__ = __iter_13__+(2);
float __temp_227__;
__temp_227__ = (4 * __tilevar_3__[__temp_226__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_225__+(0-(__iter_1__+2)))]);
float __temp_228__;
__temp_228__ = (__temp_224__ + __temp_227__);
int __temp_229__;
__temp_229__ = __iter_12__+(2);
int __temp_230__;
__temp_230__ = __iter_13__+(-2);
float __temp_231__;
__temp_231__ = (2 * __tilevar_3__[__temp_230__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_229__+(0-(__iter_1__+2)))]);
float __temp_232__;
__temp_232__ = (__temp_228__ + __temp_231__);
int __temp_233__;
__temp_233__ = __iter_12__+(2);
int __temp_234__;
__temp_234__ = __iter_13__+(-1);
float __temp_235__;
__temp_235__ = (4 * __tilevar_3__[__temp_234__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_233__+(0-(__iter_1__+2)))]);
float __temp_236__;
__temp_236__ = (__temp_232__ + __temp_235__);
int __temp_237__;
__temp_237__ = __iter_12__+(2);
int __temp_238__;
__temp_238__ = __iter_13__;
float __temp_239__;
__temp_239__ = (5 * __tilevar_3__[__temp_238__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_237__+(0-(__iter_1__+2)))]);
float __temp_240__;
__temp_240__ = (__temp_236__ + __temp_239__);
int __temp_241__;
__temp_241__ = __iter_12__+(2);
int __temp_242__;
__temp_242__ = __iter_13__+(1);
float __temp_243__;
__temp_243__ = (4 * __tilevar_3__[__temp_242__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_241__+(0-(__iter_1__+2)))]);
float __temp_244__;
__temp_244__ = (__temp_240__ + __temp_243__);
int __temp_245__;
__temp_245__ = __iter_12__+(2);
int __temp_246__;
__temp_246__ = __iter_13__+(2);
float __temp_247__;
__temp_247__ = (2 * __tilevar_3__[__temp_246__+(0-(__iter_0__+2))+(FORMA_BLOCKDIM_X-0)*(__temp_245__+(0-(__iter_1__+2)))]);
float __temp_248__;
__temp_248__ = (__temp_244__ + __temp_247__);
float __temp_249__;
__temp_249__ = (__temp_248__ / 159);
__tilevar_4__[__iter_13__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_12__+(0-(__iter_1__+4)))] = __temp_249__;
}
}
int __iter_14__;
__iter_14__ = FORMA_MAX((__iter_1__+4),2) + (int)(threadIdx.y) ;
if( __iter_14__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3)) ){
int __iter_15__;
__iter_15__ = FORMA_MAX((__iter_0__+4),2) + (int)(threadIdx.x) ;
if( __iter_15__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3)) ){
if (__iter_14__ < (FORMA_MAX((__iter_1__+4),2)+4) || __iter_14__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3))-4) || __iter_15__ < (FORMA_MAX((__iter_0__+4),2)+4) || __iter_15__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))-4)) {
__copy_arr_1__[__iter_15__+(M-0)*(__iter_14__)] = __tilevar_4__[__iter_15__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_14__+(0-(__iter_1__+4)))];
}
}
}
__syncthreads();
int __iter_20__;
__iter_20__ = FORMA_MAX((__iter_1__+6),2) + (int)(threadIdx.y) ;
if( __iter_20__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3)) ){
int __iter_21__;
__iter_21__ = FORMA_MAX((__iter_0__+6),2) + (int)(threadIdx.x) ;
if( __iter_21__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3)) ){
float __temp_300__;
__temp_300__ = (2 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_301__;
__temp_301__ = (4 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_302__;
__temp_302__ = (__temp_300__ + __temp_301__);
float __temp_303__;
__temp_303__ = (5 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_304__;
__temp_304__ = (__temp_302__ + __temp_303__);
float __temp_305__;
__temp_305__ = (4 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_306__;
__temp_306__ = (__temp_304__ + __temp_305__);
float __temp_307__;
__temp_307__ = (2 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-2)+(0-(__iter_1__+4)))]);
float __temp_308__;
__temp_308__ = (__temp_306__ + __temp_307__);
float __temp_309__;
__temp_309__ = (4 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_310__;
__temp_310__ = (__temp_308__ + __temp_309__);
float __temp_311__;
__temp_311__ = (9 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_312__;
__temp_312__ = (__temp_310__ + __temp_311__);
float __temp_313__;
__temp_313__ = (12 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_314__;
__temp_314__ = (__temp_312__ + __temp_313__);
float __temp_315__;
__temp_315__ = (9 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_316__;
__temp_316__ = (__temp_314__ + __temp_315__);
float __temp_317__;
__temp_317__ = (4 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(-1)+(0-(__iter_1__+4)))]);
float __temp_318__;
__temp_318__ = (__temp_316__ + __temp_317__);
float __temp_319__;
__temp_319__ = (5 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_320__;
__temp_320__ = (__temp_318__ + __temp_319__);
float __temp_321__;
__temp_321__ = (12 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_322__;
__temp_322__ = (__temp_320__ + __temp_321__);
float __temp_323__;
__temp_323__ = (15 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_324__;
__temp_324__ = (__temp_322__ + __temp_323__);
float __temp_325__;
__temp_325__ = (12 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_326__;
__temp_326__ = (__temp_324__ + __temp_325__);
float __temp_327__;
__temp_327__ = (5 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+4)))]);
float __temp_328__;
__temp_328__ = (__temp_326__ + __temp_327__);
float __temp_329__;
__temp_329__ = (4 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_330__;
__temp_330__ = (__temp_328__ + __temp_329__);
float __temp_331__;
__temp_331__ = (9 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_332__;
__temp_332__ = (__temp_330__ + __temp_331__);
float __temp_333__;
__temp_333__ = (12 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_334__;
__temp_334__ = (__temp_332__ + __temp_333__);
float __temp_335__;
__temp_335__ = (9 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_336__;
__temp_336__ = (__temp_334__ + __temp_335__);
float __temp_337__;
__temp_337__ = (4 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(1)+(0-(__iter_1__+4)))]);
float __temp_338__;
__temp_338__ = (__temp_336__ + __temp_337__);
float __temp_339__;
__temp_339__ = (2 * __tilevar_4__[__iter_21__+(-2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_340__;
__temp_340__ = (__temp_338__ + __temp_339__);
float __temp_341__;
__temp_341__ = (4 * __tilevar_4__[__iter_21__+(-1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_342__;
__temp_342__ = (__temp_340__ + __temp_341__);
float __temp_343__;
__temp_343__ = (5 * __tilevar_4__[__iter_21__+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_344__;
__temp_344__ = (__temp_342__ + __temp_343__);
float __temp_345__;
__temp_345__ = (4 * __tilevar_4__[__iter_21__+(1)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_346__;
__temp_346__ = (__temp_344__ + __temp_345__);
float __temp_347__;
__temp_347__ = (2 * __tilevar_4__[__iter_21__+(2)+(0-(__iter_0__+4))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(2)+(0-(__iter_1__+4)))]);
float __temp_348__;
__temp_348__ = (__temp_346__ + __temp_347__);
float __temp_349__;
__temp_349__ = (__temp_348__ / 159);
__tilevar_5__[__iter_21__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_20__+(0-(__iter_1__+6)))] = __temp_349__;
}
}
int __iter_22__;
__iter_22__ = FORMA_MAX((__iter_1__+6),2) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3)) ){
int __iter_23__;
__iter_23__ = FORMA_MAX((__iter_0__+6),2) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3)) ){
if (__iter_22__ < (FORMA_MAX((__iter_1__+6),2)+4) || __iter_22__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3))-4) || __iter_23__ < (FORMA_MAX((__iter_0__+6),2)+4) || __iter_23__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))-4)) {
__copy_arr_2__[__iter_23__+(M-0)*(__iter_22__)] = __tilevar_5__[__iter_23__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_22__+(0-(__iter_1__+6)))];
}
}
}
__syncthreads();
int __iter_28__;
__iter_28__ = FORMA_MAX((__iter_1__+8),2) + (int)(threadIdx.y) ;
if( __iter_28__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(N-3)) ){
int __iter_29__;
__iter_29__ = FORMA_MAX((__iter_0__+8),2) + (int)(threadIdx.x) ;
if( __iter_29__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3)) ){
float __temp_400__;
__temp_400__ = (2 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_401__;
__temp_401__ = (4 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_402__;
__temp_402__ = (__temp_400__ + __temp_401__);
float __temp_403__;
__temp_403__ = (5 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_404__;
__temp_404__ = (__temp_402__ + __temp_403__);
float __temp_405__;
__temp_405__ = (4 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_406__;
__temp_406__ = (__temp_404__ + __temp_405__);
float __temp_407__;
__temp_407__ = (2 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-2)+(0-(__iter_1__+6)))]);
float __temp_408__;
__temp_408__ = (__temp_406__ + __temp_407__);
float __temp_409__;
__temp_409__ = (4 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_410__;
__temp_410__ = (__temp_408__ + __temp_409__);
float __temp_411__;
__temp_411__ = (9 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_412__;
__temp_412__ = (__temp_410__ + __temp_411__);
float __temp_413__;
__temp_413__ = (12 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_414__;
__temp_414__ = (__temp_412__ + __temp_413__);
float __temp_415__;
__temp_415__ = (9 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_416__;
__temp_416__ = (__temp_414__ + __temp_415__);
float __temp_417__;
__temp_417__ = (4 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(-1)+(0-(__iter_1__+6)))]);
float __temp_418__;
__temp_418__ = (__temp_416__ + __temp_417__);
float __temp_419__;
__temp_419__ = (5 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_420__;
__temp_420__ = (__temp_418__ + __temp_419__);
float __temp_421__;
__temp_421__ = (12 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_422__;
__temp_422__ = (__temp_420__ + __temp_421__);
float __temp_423__;
__temp_423__ = (15 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_424__;
__temp_424__ = (__temp_422__ + __temp_423__);
float __temp_425__;
__temp_425__ = (12 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_426__;
__temp_426__ = (__temp_424__ + __temp_425__);
float __temp_427__;
__temp_427__ = (5 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(0-(__iter_1__+6)))]);
float __temp_428__;
__temp_428__ = (__temp_426__ + __temp_427__);
float __temp_429__;
__temp_429__ = (4 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_430__;
__temp_430__ = (__temp_428__ + __temp_429__);
float __temp_431__;
__temp_431__ = (9 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_432__;
__temp_432__ = (__temp_430__ + __temp_431__);
float __temp_433__;
__temp_433__ = (12 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_434__;
__temp_434__ = (__temp_432__ + __temp_433__);
float __temp_435__;
__temp_435__ = (9 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_436__;
__temp_436__ = (__temp_434__ + __temp_435__);
float __temp_437__;
__temp_437__ = (4 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(1)+(0-(__iter_1__+6)))]);
float __temp_438__;
__temp_438__ = (__temp_436__ + __temp_437__);
float __temp_439__;
__temp_439__ = (2 * __tilevar_5__[__iter_29__+(-2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_440__;
__temp_440__ = (__temp_438__ + __temp_439__);
float __temp_441__;
__temp_441__ = (4 * __tilevar_5__[__iter_29__+(-1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_442__;
__temp_442__ = (__temp_440__ + __temp_441__);
float __temp_443__;
__temp_443__ = (5 * __tilevar_5__[__iter_29__+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_444__;
__temp_444__ = (__temp_442__ + __temp_443__);
float __temp_445__;
__temp_445__ = (4 * __tilevar_5__[__iter_29__+(1)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_446__;
__temp_446__ = (__temp_444__ + __temp_445__);
float __temp_447__;
__temp_447__ = (2 * __tilevar_5__[__iter_29__+(2)+(0-(__iter_0__+6))+(FORMA_BLOCKDIM_X-0)*(__iter_28__+(2)+(0-(__iter_1__+6)))]);
float __temp_448__;
__temp_448__ = (__temp_446__ + __temp_447__);
float __temp_449__;
__temp_449__ = (__temp_448__ / 159);
__var_1__[__iter_29__+(M-0)*(__iter_28__)] = __temp_449__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y-0)*(FORMA_BLOCKDIM_X-0)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __copy_arr_0__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_8__;
__iter_8__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_8__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_8__ += (int)(blockDim.y) ){
int __iter_9__;
__iter_9__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_9__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_9__ += (int)(blockDim.x) ){
if (__iter_8__ < (FORMA_MAX((__iter_1__+2),2)+2) || __iter_8__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3))-2) || __iter_9__ < (FORMA_MAX((__iter_0__+2),2)+2) || __iter_9__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))-2)) {
__tilevar_2__[__iter_9__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_8__+(0-(__iter_1__-2)))] = input[__iter_9__+(M-0)*(__iter_8__)];
}
}
}
__syncthreads();
int __iter_10__;
__iter_10__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_10__ += (int)(blockDim.y) ){
int __iter_11__;
__iter_11__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_10__ < FORMA_MAX((__iter_1__+2),2) || __iter_10__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-3)) || __iter_11__ < FORMA_MAX((__iter_0__+2),2) || __iter_11__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
float __temp_100__;
__temp_100__ = (2 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_101__;
__temp_101__ = (4 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_102__;
__temp_102__ = (__temp_100__ + __temp_101__);
float __temp_103__;
__temp_103__ = (5 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_104__;
__temp_104__ = (__temp_102__ + __temp_103__);
float __temp_105__;
__temp_105__ = (4 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_106__;
__temp_106__ = (__temp_104__ + __temp_105__);
float __temp_107__;
__temp_107__ = (2 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-2)+(0-(__iter_1__-2)))]);
float __temp_108__;
__temp_108__ = (__temp_106__ + __temp_107__);
float __temp_109__;
__temp_109__ = (4 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_110__;
__temp_110__ = (__temp_108__ + __temp_109__);
float __temp_111__;
__temp_111__ = (9 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_112__;
__temp_112__ = (__temp_110__ + __temp_111__);
float __temp_113__;
__temp_113__ = (12 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_114__;
__temp_114__ = (__temp_112__ + __temp_113__);
float __temp_115__;
__temp_115__ = (9 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
float __temp_117__;
__temp_117__ = (4 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(-1)+(0-(__iter_1__-2)))]);
float __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
float __temp_119__;
__temp_119__ = (5 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
float __temp_121__;
__temp_121__ = (12 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
float __temp_123__;
__temp_123__ = (15 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
float __temp_125__;
__temp_125__ = (12 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
float __temp_127__;
__temp_127__ = (5 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(0-(__iter_1__-2)))]);
float __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
float __temp_129__;
__temp_129__ = (4 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
float __temp_131__;
__temp_131__ = (9 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
float __temp_133__;
__temp_133__ = (12 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
float __temp_135__;
__temp_135__ = (9 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
float __temp_137__;
__temp_137__ = (4 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(1)+(0-(__iter_1__-2)))]);
float __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
float __temp_139__;
__temp_139__ = (2 * __tilevar_2__[__iter_11__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
float __temp_141__;
__temp_141__ = (4 * __tilevar_2__[__iter_11__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
float __temp_143__;
__temp_143__ = (5 * __tilevar_2__[__iter_11__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
float __temp_145__;
__temp_145__ = (4 * __tilevar_2__[__iter_11__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
float __temp_147__;
__temp_147__ = (2 * __tilevar_2__[__iter_11__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_10__+(2)+(0-(__iter_1__-2)))]);
float __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
float __temp_149__;
__temp_149__ = (__temp_148__ / 159);
__copy_arr_0__[__iter_11__+(M-0)*(__iter_10__)] = __temp_149__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__1__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__2__(float * __restrict__ __copy_arr_0__, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __copy_arr_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_16__;
__iter_16__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_16__ += (int)(blockDim.y) ){
int __iter_17__;
__iter_17__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_17__ += (int)(blockDim.x) ){
if (__iter_16__ < (FORMA_MAX((__iter_1__+4),2)+2) || __iter_16__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3))-2) || __iter_17__ < (FORMA_MAX((__iter_0__+4),2)+2) || __iter_17__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))-2)) {
__tilevar_3__[__iter_17__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_16__+(0-(__iter_1__-2)))] = __copy_arr_0__[__iter_17__+(M-0)*(__iter_16__)];
}
}
}
__syncthreads();
int __iter_18__;
__iter_18__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_18__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_18__ += (int)(blockDim.y) ){
int __iter_19__;
__iter_19__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_19__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_18__ < FORMA_MAX((__iter_1__+4),2) || __iter_18__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-3)) || __iter_19__ < FORMA_MAX((__iter_0__+4),2) || __iter_19__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
float __temp_250__;
__temp_250__ = (2 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_251__;
__temp_251__ = (4 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_252__;
__temp_252__ = (__temp_250__ + __temp_251__);
float __temp_253__;
__temp_253__ = (5 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_254__;
__temp_254__ = (__temp_252__ + __temp_253__);
float __temp_255__;
__temp_255__ = (4 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_256__;
__temp_256__ = (__temp_254__ + __temp_255__);
float __temp_257__;
__temp_257__ = (2 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-2)+(0-(__iter_1__-2)))]);
float __temp_258__;
__temp_258__ = (__temp_256__ + __temp_257__);
float __temp_259__;
__temp_259__ = (4 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_260__;
__temp_260__ = (__temp_258__ + __temp_259__);
float __temp_261__;
__temp_261__ = (9 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_262__;
__temp_262__ = (__temp_260__ + __temp_261__);
float __temp_263__;
__temp_263__ = (12 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_264__;
__temp_264__ = (__temp_262__ + __temp_263__);
float __temp_265__;
__temp_265__ = (9 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_266__;
__temp_266__ = (__temp_264__ + __temp_265__);
float __temp_267__;
__temp_267__ = (4 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(-1)+(0-(__iter_1__-2)))]);
float __temp_268__;
__temp_268__ = (__temp_266__ + __temp_267__);
float __temp_269__;
__temp_269__ = (5 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_270__;
__temp_270__ = (__temp_268__ + __temp_269__);
float __temp_271__;
__temp_271__ = (12 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_272__;
__temp_272__ = (__temp_270__ + __temp_271__);
float __temp_273__;
__temp_273__ = (15 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_274__;
__temp_274__ = (__temp_272__ + __temp_273__);
float __temp_275__;
__temp_275__ = (12 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_276__;
__temp_276__ = (__temp_274__ + __temp_275__);
float __temp_277__;
__temp_277__ = (5 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(0-(__iter_1__-2)))]);
float __temp_278__;
__temp_278__ = (__temp_276__ + __temp_277__);
float __temp_279__;
__temp_279__ = (4 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_280__;
__temp_280__ = (__temp_278__ + __temp_279__);
float __temp_281__;
__temp_281__ = (9 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_282__;
__temp_282__ = (__temp_280__ + __temp_281__);
float __temp_283__;
__temp_283__ = (12 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_284__;
__temp_284__ = (__temp_282__ + __temp_283__);
float __temp_285__;
__temp_285__ = (9 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_286__;
__temp_286__ = (__temp_284__ + __temp_285__);
float __temp_287__;
__temp_287__ = (4 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(1)+(0-(__iter_1__-2)))]);
float __temp_288__;
__temp_288__ = (__temp_286__ + __temp_287__);
float __temp_289__;
__temp_289__ = (2 * __tilevar_3__[__iter_19__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_290__;
__temp_290__ = (__temp_288__ + __temp_289__);
float __temp_291__;
__temp_291__ = (4 * __tilevar_3__[__iter_19__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_292__;
__temp_292__ = (__temp_290__ + __temp_291__);
float __temp_293__;
__temp_293__ = (5 * __tilevar_3__[__iter_19__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_294__;
__temp_294__ = (__temp_292__ + __temp_293__);
float __temp_295__;
__temp_295__ = (4 * __tilevar_3__[__iter_19__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_296__;
__temp_296__ = (__temp_294__ + __temp_295__);
float __temp_297__;
__temp_297__ = (2 * __tilevar_3__[__iter_19__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_18__+(2)+(0-(__iter_1__-2)))]);
float __temp_298__;
__temp_298__ = (__temp_296__ + __temp_297__);
float __temp_299__;
__temp_299__ = (__temp_298__ / 159);
__copy_arr_1__[__iter_19__+(M-0)*(__iter_18__)] = __temp_299__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__2__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__3__(float * __restrict__ __copy_arr_1__, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __copy_arr_2__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_24__;
__iter_24__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_24__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_24__ += (int)(blockDim.y) ){
int __iter_25__;
__iter_25__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_25__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_25__ += (int)(blockDim.x) ){
if (__iter_24__ < (FORMA_MAX((__iter_1__+6),2)+2) || __iter_24__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3))-2) || __iter_25__ < (FORMA_MAX((__iter_0__+6),2)+2) || __iter_25__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))-2)) {
__tilevar_4__[__iter_25__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_24__+(0-(__iter_1__-2)))] = __copy_arr_1__[__iter_25__+(M-0)*(__iter_24__)];
}
}
}
__syncthreads();
int __iter_26__;
__iter_26__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_26__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_26__ += (int)(blockDim.y) ){
int __iter_27__;
__iter_27__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_27__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_26__ < FORMA_MAX((__iter_1__+6),2) || __iter_26__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-7),(N-3)) || __iter_27__ < FORMA_MAX((__iter_0__+6),2) || __iter_27__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
float __temp_350__;
__temp_350__ = (2 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_351__;
__temp_351__ = (4 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_352__;
__temp_352__ = (__temp_350__ + __temp_351__);
float __temp_353__;
__temp_353__ = (5 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_354__;
__temp_354__ = (__temp_352__ + __temp_353__);
float __temp_355__;
__temp_355__ = (4 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_356__;
__temp_356__ = (__temp_354__ + __temp_355__);
float __temp_357__;
__temp_357__ = (2 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-2)+(0-(__iter_1__-2)))]);
float __temp_358__;
__temp_358__ = (__temp_356__ + __temp_357__);
float __temp_359__;
__temp_359__ = (4 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_360__;
__temp_360__ = (__temp_358__ + __temp_359__);
float __temp_361__;
__temp_361__ = (9 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_362__;
__temp_362__ = (__temp_360__ + __temp_361__);
float __temp_363__;
__temp_363__ = (12 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_364__;
__temp_364__ = (__temp_362__ + __temp_363__);
float __temp_365__;
__temp_365__ = (9 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_366__;
__temp_366__ = (__temp_364__ + __temp_365__);
float __temp_367__;
__temp_367__ = (4 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(-1)+(0-(__iter_1__-2)))]);
float __temp_368__;
__temp_368__ = (__temp_366__ + __temp_367__);
float __temp_369__;
__temp_369__ = (5 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_370__;
__temp_370__ = (__temp_368__ + __temp_369__);
float __temp_371__;
__temp_371__ = (12 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_372__;
__temp_372__ = (__temp_370__ + __temp_371__);
float __temp_373__;
__temp_373__ = (15 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_374__;
__temp_374__ = (__temp_372__ + __temp_373__);
float __temp_375__;
__temp_375__ = (12 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_376__;
__temp_376__ = (__temp_374__ + __temp_375__);
float __temp_377__;
__temp_377__ = (5 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(0-(__iter_1__-2)))]);
float __temp_378__;
__temp_378__ = (__temp_376__ + __temp_377__);
float __temp_379__;
__temp_379__ = (4 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_380__;
__temp_380__ = (__temp_378__ + __temp_379__);
float __temp_381__;
__temp_381__ = (9 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_382__;
__temp_382__ = (__temp_380__ + __temp_381__);
float __temp_383__;
__temp_383__ = (12 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_384__;
__temp_384__ = (__temp_382__ + __temp_383__);
float __temp_385__;
__temp_385__ = (9 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_386__;
__temp_386__ = (__temp_384__ + __temp_385__);
float __temp_387__;
__temp_387__ = (4 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(1)+(0-(__iter_1__-2)))]);
float __temp_388__;
__temp_388__ = (__temp_386__ + __temp_387__);
float __temp_389__;
__temp_389__ = (2 * __tilevar_4__[__iter_27__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_390__;
__temp_390__ = (__temp_388__ + __temp_389__);
float __temp_391__;
__temp_391__ = (4 * __tilevar_4__[__iter_27__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_392__;
__temp_392__ = (__temp_390__ + __temp_391__);
float __temp_393__;
__temp_393__ = (5 * __tilevar_4__[__iter_27__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_394__;
__temp_394__ = (__temp_392__ + __temp_393__);
float __temp_395__;
__temp_395__ = (4 * __tilevar_4__[__iter_27__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_396__;
__temp_396__ = (__temp_394__ + __temp_395__);
float __temp_397__;
__temp_397__ = (2 * __tilevar_4__[__iter_27__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_26__+(2)+(0-(__iter_1__-2)))]);
float __temp_398__;
__temp_398__ = (__temp_396__ + __temp_397__);
float __temp_399__;
__temp_399__ = (__temp_398__ / 159);
__copy_arr_2__[__iter_27__+(M-0)*(__iter_26__)] = __temp_399__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__3__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
__global__ void __kernel___forma_kernel__4__(float * __restrict__ __copy_arr_2__, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float* __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X);
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y);
int __iter_30__;
__iter_30__ = FORMA_MAX((__iter_1__+(-2)),0) + (int)(threadIdx.y) ;
for( ; __iter_30__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-(-1)),(N-1)) ; __iter_30__ += (int)(blockDim.y) ){
int __iter_31__;
__iter_31__ = FORMA_MAX((__iter_0__+(-2)),0) + (int)(threadIdx.x) ;
for( ; __iter_31__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-(-1)),(M-1)) ; __iter_31__ += (int)(blockDim.x) ){
if (__iter_30__ < (FORMA_MAX((__iter_1__+8),2)+2) || __iter_30__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(N-3))-2) || __iter_31__ < (FORMA_MAX((__iter_0__+8),2)+2) || __iter_31__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))-2)) {
__tilevar_5__[__iter_31__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_30__+(0-(__iter_1__-2)))] = __copy_arr_2__[__iter_31__+(M-0)*(__iter_30__)];
}
}
}
__syncthreads();
int __iter_32__;
__iter_32__ = FORMA_MAX(__iter_1__,2) + (int)(threadIdx.y) ;
for( ; __iter_32__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-3)) ; __iter_32__ += (int)(blockDim.y) ){
int __iter_33__;
__iter_33__ = FORMA_MAX(__iter_0__,2) + (int)(threadIdx.x) ;
if( __iter_33__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-3)) ){
if (__iter_32__ < FORMA_MAX((__iter_1__+8),2) || __iter_32__ > FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-9),(N-3)) || __iter_33__ < FORMA_MAX((__iter_0__+8),2) || __iter_33__ > FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
float __temp_450__;
__temp_450__ = (2 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_451__;
__temp_451__ = (4 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_452__;
__temp_452__ = (__temp_450__ + __temp_451__);
float __temp_453__;
__temp_453__ = (5 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_454__;
__temp_454__ = (__temp_452__ + __temp_453__);
float __temp_455__;
__temp_455__ = (4 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_456__;
__temp_456__ = (__temp_454__ + __temp_455__);
float __temp_457__;
__temp_457__ = (2 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-2)+(0-(__iter_1__-2)))]);
float __temp_458__;
__temp_458__ = (__temp_456__ + __temp_457__);
float __temp_459__;
__temp_459__ = (4 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_460__;
__temp_460__ = (__temp_458__ + __temp_459__);
float __temp_461__;
__temp_461__ = (9 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_462__;
__temp_462__ = (__temp_460__ + __temp_461__);
float __temp_463__;
__temp_463__ = (12 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_464__;
__temp_464__ = (__temp_462__ + __temp_463__);
float __temp_465__;
__temp_465__ = (9 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_466__;
__temp_466__ = (__temp_464__ + __temp_465__);
float __temp_467__;
__temp_467__ = (4 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(-1)+(0-(__iter_1__-2)))]);
float __temp_468__;
__temp_468__ = (__temp_466__ + __temp_467__);
float __temp_469__;
__temp_469__ = (5 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_470__;
__temp_470__ = (__temp_468__ + __temp_469__);
float __temp_471__;
__temp_471__ = (12 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_472__;
__temp_472__ = (__temp_470__ + __temp_471__);
float __temp_473__;
__temp_473__ = (15 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_474__;
__temp_474__ = (__temp_472__ + __temp_473__);
float __temp_475__;
__temp_475__ = (12 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_476__;
__temp_476__ = (__temp_474__ + __temp_475__);
float __temp_477__;
__temp_477__ = (5 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(0-(__iter_1__-2)))]);
float __temp_478__;
__temp_478__ = (__temp_476__ + __temp_477__);
float __temp_479__;
__temp_479__ = (4 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_480__;
__temp_480__ = (__temp_478__ + __temp_479__);
float __temp_481__;
__temp_481__ = (9 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_482__;
__temp_482__ = (__temp_480__ + __temp_481__);
float __temp_483__;
__temp_483__ = (12 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_484__;
__temp_484__ = (__temp_482__ + __temp_483__);
float __temp_485__;
__temp_485__ = (9 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_486__;
__temp_486__ = (__temp_484__ + __temp_485__);
float __temp_487__;
__temp_487__ = (4 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(1)+(0-(__iter_1__-2)))]);
float __temp_488__;
__temp_488__ = (__temp_486__ + __temp_487__);
float __temp_489__;
__temp_489__ = (2 * __tilevar_5__[__iter_33__+(-2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_490__;
__temp_490__ = (__temp_488__ + __temp_489__);
float __temp_491__;
__temp_491__ = (4 * __tilevar_5__[__iter_33__+(-1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_492__;
__temp_492__ = (__temp_490__ + __temp_491__);
float __temp_493__;
__temp_493__ = (5 * __tilevar_5__[__iter_33__+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_494__;
__temp_494__ = (__temp_492__ + __temp_493__);
float __temp_495__;
__temp_495__ = (4 * __tilevar_5__[__iter_33__+(1)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_496__;
__temp_496__ = (__temp_494__ + __temp_495__);
float __temp_497__;
__temp_497__ = (2 * __tilevar_5__[__iter_33__+(2)+(0-(__iter_0__-2))+(FORMA_BLOCKDIM_X+4)*(__iter_32__+(2)+(0-(__iter_1__-2)))]);
float __temp_498__;
__temp_498__ = (__temp_496__ + __temp_497__);
float __temp_499__;
__temp_499__ = (__temp_498__ / 159);
__var_1__[__iter_33__+(M-0)*(__iter_32__)] = __temp_499__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__4__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(((FORMA_BLOCKDIM_Y+4)*(FORMA_BLOCKDIM_X+4)));
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-1) - 0 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___0,FORMA_MAX((__size_0___kernel___forma_kernel__0__)/32,1)*32),FORMA_MAX_BLOCKDIM_0),13);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_0___kernel___forma_kernel__0__;
int __max_occupancy_blocksize___kernel___forma_kernel__0___1 = __max_occupancy_blocksize___kernel___forma_kernel__0__;
int __block_1___kernel___forma_kernel__0__ = FORMA_MAX(FORMA_MIN(FORMA_MIN(__max_occupancy_blocksize___kernel___forma_kernel__0___1,__size_1___kernel___forma_kernel__0__),FORMA_MAX_BLOCKDIM_1),13);
__max_occupancy_blocksize___kernel___forma_kernel__0__ /= __block_1___kernel___forma_kernel__0__;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
while( __SMemSize___kernel___forma_kernel__0__ > __FORMA_MAX_SHARED_MEM__){
if( __blockConfig___kernel___forma_kernel__0__.y/2 > 13)
__blockConfig___kernel___forma_kernel__0__.y /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
if( __SMemSize___kernel___forma_kernel__0__ <= __FORMA_MAX_SHARED_MEM__)
break;
if( __blockConfig___kernel___forma_kernel__0__.x/2 > FORMA_MIN(32,13))
__blockConfig___kernel___forma_kernel__0__.x /= 2;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
}
__blockConfig___kernel___forma_kernel__0__.y = 16;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
int __SMemSize___kernel___forma_kernel__1__ = 0;
__SMemSize___kernel___forma_kernel__1__ = __blockSizeToSMemSize___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__1__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__1__, __SMemSize___kernel___forma_kernel__1__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __copy_arr_0__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
int __SMemSize___kernel___forma_kernel__2__ = 0;
__SMemSize___kernel___forma_kernel__2__ = __blockSizeToSMemSize___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__2__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
__kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__2__, __SMemSize___kernel___forma_kernel__2__>>> (__copy_arr_0__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __copy_arr_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
int __SMemSize___kernel___forma_kernel__3__ = 0;
__SMemSize___kernel___forma_kernel__3__ = __blockSizeToSMemSize___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__3__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
__kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__3__, __SMemSize___kernel___forma_kernel__3__>>> (__copy_arr_1__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __copy_arr_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
int __SMemSize___kernel___forma_kernel__4__ = 0;
__SMemSize___kernel___forma_kernel__4__ = __blockSizeToSMemSize___kernel___forma_kernel__4__(__blockConfig___kernel___forma_kernel__0__);
dim3 __blockConfig___kernel___forma_kernel__4__(__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y/4);
__kernel___forma_kernel__4__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__4__, __SMemSize___kernel___forma_kernel__4__>>> (__copy_arr_2__, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__4__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
86a45419e54adc04b6e64f9f2c99cc41a0d14874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
//
// QUICKSORT.CU
//
// Implementation of a parallel quicksort in CUDA. It comes in
// several parts:
//
// 1. A small-set insertion sort. We do this on any set with <=32 elements
// 2. A partitioning kernel, which - given a pivot - separates an input
// array into elements <=pivot, and >pivot. Two quicksorts will then
// be launched to resolve each of these.
// 3. A quicksort co-ordinator, which figures out what kernels to launch
// and when.
//
////////////////////////////////////////////////////////////////////////////////
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <helper_cuda.h>
#include <helper_string.h>
#include "cdpQuicksort.h"
////////////////////////////////////////////////////////////////////////////////
// Inline PTX call to return index of highest non-zero bit in a word
////////////////////////////////////////////////////////////////////////////////
static __device__ __forceinline__ unsigned int __qsflo(unsigned int word)
{
unsigned int ret;
asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(word));
return ret;
}
////////////////////////////////////////////////////////////////////////////////
//
// ringbufAlloc
//
// Allocates from a ringbuffer. Allows for not failing when we run out
// of stack for tracking the offset counts for each sort subsection.
//
// We use the atomicMax trick to allow out-of-order retirement. If we
// hit the size limit on the ringbuffer, then we spin-wait for people
// to complete.
//
////////////////////////////////////////////////////////////////////////////////
template< typename T >
static __device__ T *ringbufAlloc(qsortRingbuf *ringbuf)
{
// Wait for there to be space in the ring buffer. We'll retry only a fixed
// number of times and then fail, to avoid an out-of-memory deadlock.
unsigned int loop = 10000;
while (((ringbuf->head - ringbuf->tail) >= ringbuf->stacksize) && (loop-- > 0));
if (loop == 0)
return NULL;
// Note that the element includes a little index book-keeping, for freeing later.
unsigned int index = atomicAdd((unsigned int *) &ringbuf->head, 1);
T *ret = (T *)(ringbuf->stackbase) + (index & (ringbuf->stacksize-1));
ret->index = index;
return ret;
}
////////////////////////////////////////////////////////////////////////////////
//
// ringBufFree
//
// Releases an element from the ring buffer. If every element is released
// up to and including this one, we can advance the tail to indicate that
// space is now available.
//
////////////////////////////////////////////////////////////////////////////////
template< typename T >
static __device__ void ringbufFree(qsortRingbuf *ringbuf, T *data)
{
unsigned int index = data->index; // Non-wrapped index to free
unsigned int count = atomicAdd((unsigned int *)&(ringbuf->count), 1) + 1;
unsigned int max = atomicMax((unsigned int *)&(ringbuf->max), index + 1);
// Update the tail if need be. Note we update "max" to be the new value in ringbuf->max
if (max < (index+1)) max = index+1;
if (max == count)
atomicMax((unsigned int *)&(ringbuf->tail), count);
}
////////////////////////////////////////////////////////////////////////////////
//
// qsort_warp
//
// Simplest possible implementation, does a per-warp quicksort with no inter-warp
// communication. This has a high atomic issue rate, but the rest should actually
// be fairly quick because of low work per thread.
//
// A warp finds its section of the data, then writes all data <pivot to one
// buffer and all data >pivot to the other. Atomics are used to get a unique
// section of the buffer.
//
// Obvious optimisation: do multiple chunks per warp, to increase in-flight loads
// and cover the instruction overhead.
//
////////////////////////////////////////////////////////////////////////////////
__global__ void qsort_warp(unsigned *indata,
unsigned *outdata,
unsigned int offset,
unsigned int len,
qsortAtomicData *atomicData,
qsortRingbuf *atomicDataStack,
unsigned int source_is_indata,
unsigned int depth)
{
// Find my data offset, based on warp ID
unsigned int thread_id = threadIdx.x + (blockIdx.x << QSORT_BLOCKSIZE_SHIFT);
//unsigned int warp_id = threadIdx.x >> 5; // Used for debug only
unsigned int lane_id = threadIdx.x & (warpSize-1);
// Exit if I'm outside the range of sort to be done
if (thread_id >= len)
return;
//
// First part of the algorithm. Each warp counts the number of elements that are
// greater/less than the pivot.
//
// When a warp knows its count, it updates an atomic counter.
//
// Read in the data and the pivot. Arbitrary pivot selection for now.
unsigned pivot = indata[offset + len/2];
unsigned data = indata[offset + thread_id];
// Count how many are <= and how many are > pivot.
// If all are <= pivot then we adjust the comparison
// because otherwise the sort will move nothing and
// we'll iterate forever.
unsigned int greater = (data > pivot);
unsigned int gt_mask = __ballot(greater);
if (gt_mask == 0)
{
greater = (data >= pivot);
gt_mask = __ballot(greater); // Must re-ballot for adjusted comparator
}
unsigned int lt_mask = __ballot(!greater);
unsigned int gt_count = __popc(gt_mask);
unsigned int lt_count = __popc(lt_mask);
// Atomically adjust the lt_ and gt_offsets by this amount. Only one thread need do this. Share the result using shfl
unsigned int lt_offset, gt_offset;
if (lane_id == 0)
{
if (lt_count > 0)
lt_offset = atomicAdd((unsigned int *) &atomicData->lt_offset, lt_count);
if (gt_count > 0)
gt_offset = len - (atomicAdd((unsigned int *) &atomicData->gt_offset, gt_count) + gt_count);
}
lt_offset = __shfl((int)lt_offset, 0); // Everyone pulls the offsets from lane 0
gt_offset = __shfl((int)gt_offset, 0);
__syncthreads();
// Now compute my own personal offset within this. I need to know how many
// threads with a lane ID less than mine are going to write to the same buffer
// as me. We can use popc to implement a single-operation warp scan in this case.
unsigned lane_mask_lt;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt));
unsigned int my_mask = greater ? gt_mask : lt_mask;
unsigned int my_offset = __popc(my_mask & lane_mask_lt);
// Move data.
my_offset += greater ? gt_offset : lt_offset;
outdata[offset + my_offset] = data;
// Count up if we're the last warp in. If so, then Kepler will launch the next
// set of sorts directly from here.
if (lane_id == 0)
{
// Count "elements written". If I wrote the last one, then trigger the next qsorts
unsigned int mycount = lt_count + gt_count;
if (atomicAdd((unsigned int *) &atomicData->sorted_count, mycount) + mycount == len)
{
// We're the last warp to do any sorting. Therefore it's up to us to launch the next stage.
unsigned int lt_len = atomicData->lt_offset;
unsigned int gt_len = atomicData->gt_offset;
hipStream_t lstream, rstream;
hipStreamCreateWithFlags(&lstream, hipStreamNonBlocking);
hipStreamCreateWithFlags(&rstream, hipStreamNonBlocking);
// Begin by freeing our atomicData storage. It's better for the ringbuffer algorithm
// if we free when we're done, rather than re-using (makes for less fragmentation).
ringbufFree<qsortAtomicData>(atomicDataStack, atomicData);
// Exceptional case: if "lt_len" is zero, then all values in the batch
// are equal. We are then done (may need to copy into correct buffer, though)
if (lt_len == 0)
{
if (source_is_indata)
hipMemcpyAsync(indata+offset, outdata+offset, gt_len*sizeof(unsigned), hipMemcpyDeviceToDevice, lstream);
return;
}
// Start with lower half first
if (lt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
{
// The final bitonic stage sorts in-place in "outdata". We therefore
// re-use "indata" as the out-of-range tracking buffer. For (2^n)+1
// elements we need (2^(n+1)) bytes of oor buffer. The backup qsort
// buffer is at least this large when sizeof(QTYPE) >= 2.
hipLaunchKernelGGL(( big_bitonicsort), dim3(1), dim3(BITONICSORT_LEN), 0, lstream , outdata, source_is_indata ? indata : outdata, indata, offset, lt_len);
}
else
{
// Launch another quicksort. We need to allocate more storage for the atomic data.
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack-allocation error. Failing left child launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(lt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
hipLaunchKernelGGL(( qsort_warp), dim3(numblocks), dim3(QSORT_BLOCKSIZE), 0, lstream , outdata, indata, offset, lt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (lt_len > 1)
{
// Final stage uses a bitonic sort instead. It's important to
// make sure the final stage ends up in the correct (original) buffer.
// We launch the smallest power-of-2 number of threads that we can.
unsigned int bitonic_len = 1 << (__qsflo(lt_len-1U)+1);
hipLaunchKernelGGL(( bitonicsort), dim3(1), dim3(bitonic_len), 0, lstream , outdata, source_is_indata ? indata : outdata, offset, lt_len);
}
// Finally, if we sorted just one single element, we must still make
// sure that it winds up in the correct place.
else if (source_is_indata && (lt_len == 1))
indata[offset] = outdata[offset];
if (hipPeekAtLastError() != hipSuccess)
printf("Left-side launch fail: %s\n", hipGetErrorString(hipGetLastError()));
// Now the upper half.
if (gt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
hipLaunchKernelGGL(( big_bitonicsort), dim3(1), dim3(BITONICSORT_LEN), 0, rstream , outdata, source_is_indata ? indata : outdata, indata, offset+lt_len, gt_len);
else
{
// Allocate new atomic storage for this launch
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack allocation error! Failing right-side launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(gt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
hipLaunchKernelGGL(( qsort_warp), dim3(numblocks), dim3(QSORT_BLOCKSIZE), 0, rstream , outdata, indata, offset+lt_len, gt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (gt_len > 1)
{
unsigned int bitonic_len = 1 << (__qsflo(gt_len-1U)+1);
hipLaunchKernelGGL(( bitonicsort), dim3(1), dim3(bitonic_len), 0, rstream , outdata, source_is_indata ? indata : outdata, offset+lt_len, gt_len);
}
else if (source_is_indata && (gt_len == 1))
indata[offset+lt_len] = outdata[offset+lt_len];
if (hipPeekAtLastError() != hipSuccess)
printf("Right-side launch fail: %s\n", hipGetErrorString(hipGetLastError()));
}
}
}
////////////////////////////////////////////////////////////////////////////////
//
// run_quicksort
//
// Host-side code to run the Kepler version of quicksort. It's pretty
// simple, because all launch control is handled on the device via CDP.
//
// All parallel quicksorts require an equal-sized scratch buffer. This
// must be passed in ahead of time.
//
// Returns the time elapsed for the sort.
//
////////////////////////////////////////////////////////////////////////////////
float run_quicksort_cdp(unsigned *gpudata, unsigned *scratchdata, unsigned int count, hipStream_t stream)
{
unsigned int stacksize = QSORT_STACK_ELEMS;
// This is the stack, for atomic tracking of each sort's status
qsortAtomicData *gpustack;
checkCudaErrors(hipMalloc((void **)&gpustack, stacksize * sizeof(qsortAtomicData)));
checkCudaErrors(hipMemset(gpustack, 0, sizeof(qsortAtomicData))); // Only need set first entry to 0
// Create the memory ringbuffer used for handling the stack.
// Initialise everything to where it needs to be.
qsortRingbuf buf;
qsortRingbuf *ringbuf;
checkCudaErrors(hipMalloc((void **)&ringbuf, sizeof(qsortRingbuf)));
buf.head = 1; // We start with one allocation
buf.tail = 0;
buf.count = 0;
buf.max = 0;
buf.stacksize = stacksize;
buf.stackbase = gpustack;
checkCudaErrors(hipMemcpy(ringbuf, &buf, sizeof(buf), hipMemcpyHostToDevice));
// Timing events...
hipEvent_t ev1, ev2;
checkCudaErrors(hipEventCreate(&ev1));
checkCudaErrors(hipEventCreate(&ev2));
checkCudaErrors(hipEventRecord(ev1));
// Now we trivially launch the qsort kernel
if (count > BITONICSORT_LEN)
{
unsigned int numblocks = (unsigned int)(count+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
hipLaunchKernelGGL(( qsort_warp), dim3(numblocks), dim3(QSORT_BLOCKSIZE), 0, stream , gpudata, scratchdata, 0U, count, gpustack, ringbuf, true, 0);
}
else
{
hipLaunchKernelGGL(( bitonicsort), dim3(1), dim3(BITONICSORT_LEN) , 0, 0, gpudata, gpudata, 0, count);
}
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipEventRecord(ev2));
checkCudaErrors(hipDeviceSynchronize());
float elapse=0.0f;
if (hipPeekAtLastError() != hipSuccess)
printf("Launch failure: %s\n", hipGetErrorString(hipGetLastError()));
else
checkCudaErrors(hipEventElapsedTime(&elapse, ev1, ev2));
// Sanity check that the stack allocator is doing the right thing
checkCudaErrors(hipMemcpy(&buf, ringbuf, sizeof(*ringbuf), hipMemcpyDeviceToHost));
if (count > BITONICSORT_LEN && buf.head != buf.tail)
{
printf("Stack allocation error!\nRingbuf:\n");
printf("\t head = %u\n", buf.head);
printf("\t tail = %u\n", buf.tail);
printf("\tcount = %u\n", buf.count);
printf("\t max = %u\n", buf.max);
}
// Release our stack data once we're done
checkCudaErrors(hipFree(ringbuf));
checkCudaErrors(hipFree(gpustack));
return elapse;
}
static void usage()
{
printf("Syntax: qsort [-size=<num>] [-seed=<num>] [-debug] [-loop-step=<num>] [-verbose]\n");
printf("If loop_step is non-zero, will run from 1->array_len in steps of loop_step\n");
}
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
//#define ELAPSED_TIME 0
// Host side entry
int main(int argc, char *argv[])
{
uint num_of_elements;
uint i;
scanf("%d", &num_of_elements);
uint mem_size_vec = sizeof(int) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++) {
scanf("%d", &h_vec[i]);
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
uint *d_scratchdata, *d_vec;
cudaTest(hipMalloc((void **) &d_vec, mem_size_vec));
cudaTest(hipMalloc((void **) &d_scratchdata, mem_size_vec));
cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice));
hipEventRecord(start);
float elapse;
elapse = run_quicksort_cdp(d_vec, d_scratchdata, num_of_elements, NULL);
hipEventRecord(stop);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
hipMemcpy(h_vec, d_vec, mem_size_vec, hipMemcpyDeviceToHost);
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
} else
print(h_vec, num_of_elements);
free(h_vec);
hipFree(d_vec);
hipFree(d_scratchdata);
hipDeviceReset();
return 0;
}
| 86a45419e54adc04b6e64f9f2c99cc41a0d14874.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
////////////////////////////////////////////////////////////////////////////////
//
// QUICKSORT.CU
//
// Implementation of a parallel quicksort in CUDA. It comes in
// several parts:
//
// 1. A small-set insertion sort. We do this on any set with <=32 elements
// 2. A partitioning kernel, which - given a pivot - separates an input
// array into elements <=pivot, and >pivot. Two quicksorts will then
// be launched to resolve each of these.
// 3. A quicksort co-ordinator, which figures out what kernels to launch
// and when.
//
////////////////////////////////////////////////////////////////////////////////
#include <thrust/random.h>
#include <thrust/device_vector.h>
#include <helper_cuda.h>
#include <helper_string.h>
#include "cdpQuicksort.h"
////////////////////////////////////////////////////////////////////////////////
// Inline PTX call to return index of highest non-zero bit in a word
////////////////////////////////////////////////////////////////////////////////
static __device__ __forceinline__ unsigned int __qsflo(unsigned int word)
{
unsigned int ret;
asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(word));
return ret;
}
////////////////////////////////////////////////////////////////////////////////
//
// ringbufAlloc
//
// Allocates from a ringbuffer. Allows for not failing when we run out
// of stack for tracking the offset counts for each sort subsection.
//
// We use the atomicMax trick to allow out-of-order retirement. If we
// hit the size limit on the ringbuffer, then we spin-wait for people
// to complete.
//
////////////////////////////////////////////////////////////////////////////////
template< typename T >
static __device__ T *ringbufAlloc(qsortRingbuf *ringbuf)
{
// Wait for there to be space in the ring buffer. We'll retry only a fixed
// number of times and then fail, to avoid an out-of-memory deadlock.
unsigned int loop = 10000;
while (((ringbuf->head - ringbuf->tail) >= ringbuf->stacksize) && (loop-- > 0));
if (loop == 0)
return NULL;
// Note that the element includes a little index book-keeping, for freeing later.
unsigned int index = atomicAdd((unsigned int *) &ringbuf->head, 1);
T *ret = (T *)(ringbuf->stackbase) + (index & (ringbuf->stacksize-1));
ret->index = index;
return ret;
}
////////////////////////////////////////////////////////////////////////////////
//
// ringBufFree
//
// Releases an element from the ring buffer. If every element is released
// up to and including this one, we can advance the tail to indicate that
// space is now available.
//
////////////////////////////////////////////////////////////////////////////////
template< typename T >
static __device__ void ringbufFree(qsortRingbuf *ringbuf, T *data)
{
unsigned int index = data->index; // Non-wrapped index to free
unsigned int count = atomicAdd((unsigned int *)&(ringbuf->count), 1) + 1;
unsigned int max = atomicMax((unsigned int *)&(ringbuf->max), index + 1);
// Update the tail if need be. Note we update "max" to be the new value in ringbuf->max
if (max < (index+1)) max = index+1;
if (max == count)
atomicMax((unsigned int *)&(ringbuf->tail), count);
}
////////////////////////////////////////////////////////////////////////////////
//
// qsort_warp
//
// Simplest possible implementation, does a per-warp quicksort with no inter-warp
// communication. This has a high atomic issue rate, but the rest should actually
// be fairly quick because of low work per thread.
//
// A warp finds its section of the data, then writes all data <pivot to one
// buffer and all data >pivot to the other. Atomics are used to get a unique
// section of the buffer.
//
// Obvious optimisation: do multiple chunks per warp, to increase in-flight loads
// and cover the instruction overhead.
//
////////////////////////////////////////////////////////////////////////////////
__global__ void qsort_warp(unsigned *indata,
unsigned *outdata,
unsigned int offset,
unsigned int len,
qsortAtomicData *atomicData,
qsortRingbuf *atomicDataStack,
unsigned int source_is_indata,
unsigned int depth)
{
// Find my data offset, based on warp ID
unsigned int thread_id = threadIdx.x + (blockIdx.x << QSORT_BLOCKSIZE_SHIFT);
//unsigned int warp_id = threadIdx.x >> 5; // Used for debug only
unsigned int lane_id = threadIdx.x & (warpSize-1);
// Exit if I'm outside the range of sort to be done
if (thread_id >= len)
return;
//
// First part of the algorithm. Each warp counts the number of elements that are
// greater/less than the pivot.
//
// When a warp knows its count, it updates an atomic counter.
//
// Read in the data and the pivot. Arbitrary pivot selection for now.
unsigned pivot = indata[offset + len/2];
unsigned data = indata[offset + thread_id];
// Count how many are <= and how many are > pivot.
// If all are <= pivot then we adjust the comparison
// because otherwise the sort will move nothing and
// we'll iterate forever.
unsigned int greater = (data > pivot);
unsigned int gt_mask = __ballot(greater);
if (gt_mask == 0)
{
greater = (data >= pivot);
gt_mask = __ballot(greater); // Must re-ballot for adjusted comparator
}
unsigned int lt_mask = __ballot(!greater);
unsigned int gt_count = __popc(gt_mask);
unsigned int lt_count = __popc(lt_mask);
// Atomically adjust the lt_ and gt_offsets by this amount. Only one thread need do this. Share the result using shfl
unsigned int lt_offset, gt_offset;
if (lane_id == 0)
{
if (lt_count > 0)
lt_offset = atomicAdd((unsigned int *) &atomicData->lt_offset, lt_count);
if (gt_count > 0)
gt_offset = len - (atomicAdd((unsigned int *) &atomicData->gt_offset, gt_count) + gt_count);
}
lt_offset = __shfl((int)lt_offset, 0); // Everyone pulls the offsets from lane 0
gt_offset = __shfl((int)gt_offset, 0);
__syncthreads();
// Now compute my own personal offset within this. I need to know how many
// threads with a lane ID less than mine are going to write to the same buffer
// as me. We can use popc to implement a single-operation warp scan in this case.
unsigned lane_mask_lt;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lane_mask_lt));
unsigned int my_mask = greater ? gt_mask : lt_mask;
unsigned int my_offset = __popc(my_mask & lane_mask_lt);
// Move data.
my_offset += greater ? gt_offset : lt_offset;
outdata[offset + my_offset] = data;
// Count up if we're the last warp in. If so, then Kepler will launch the next
// set of sorts directly from here.
if (lane_id == 0)
{
// Count "elements written". If I wrote the last one, then trigger the next qsorts
unsigned int mycount = lt_count + gt_count;
if (atomicAdd((unsigned int *) &atomicData->sorted_count, mycount) + mycount == len)
{
// We're the last warp to do any sorting. Therefore it's up to us to launch the next stage.
unsigned int lt_len = atomicData->lt_offset;
unsigned int gt_len = atomicData->gt_offset;
cudaStream_t lstream, rstream;
cudaStreamCreateWithFlags(&lstream, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&rstream, cudaStreamNonBlocking);
// Begin by freeing our atomicData storage. It's better for the ringbuffer algorithm
// if we free when we're done, rather than re-using (makes for less fragmentation).
ringbufFree<qsortAtomicData>(atomicDataStack, atomicData);
// Exceptional case: if "lt_len" is zero, then all values in the batch
// are equal. We are then done (may need to copy into correct buffer, though)
if (lt_len == 0)
{
if (source_is_indata)
cudaMemcpyAsync(indata+offset, outdata+offset, gt_len*sizeof(unsigned), cudaMemcpyDeviceToDevice, lstream);
return;
}
// Start with lower half first
if (lt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
{
// The final bitonic stage sorts in-place in "outdata". We therefore
// re-use "indata" as the out-of-range tracking buffer. For (2^n)+1
// elements we need (2^(n+1)) bytes of oor buffer. The backup qsort
// buffer is at least this large when sizeof(QTYPE) >= 2.
big_bitonicsort<<< 1, BITONICSORT_LEN, 0, lstream >>>(outdata, source_is_indata ? indata : outdata, indata, offset, lt_len);
}
else
{
// Launch another quicksort. We need to allocate more storage for the atomic data.
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack-allocation error. Failing left child launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(lt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
qsort_warp<<< numblocks, QSORT_BLOCKSIZE, 0, lstream >>>(outdata, indata, offset, lt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (lt_len > 1)
{
// Final stage uses a bitonic sort instead. It's important to
// make sure the final stage ends up in the correct (original) buffer.
// We launch the smallest power-of-2 number of threads that we can.
unsigned int bitonic_len = 1 << (__qsflo(lt_len-1U)+1);
bitonicsort<<< 1, bitonic_len, 0, lstream >>>(outdata, source_is_indata ? indata : outdata, offset, lt_len);
}
// Finally, if we sorted just one single element, we must still make
// sure that it winds up in the correct place.
else if (source_is_indata && (lt_len == 1))
indata[offset] = outdata[offset];
if (cudaPeekAtLastError() != cudaSuccess)
printf("Left-side launch fail: %s\n", cudaGetErrorString(cudaGetLastError()));
// Now the upper half.
if (gt_len > BITONICSORT_LEN)
{
// If we've exceeded maximum depth, fall through to backup big_bitonicsort
if (depth >= QSORT_MAXDEPTH)
big_bitonicsort<<< 1, BITONICSORT_LEN, 0, rstream >>>(outdata, source_is_indata ? indata : outdata, indata, offset+lt_len, gt_len);
else
{
// Allocate new atomic storage for this launch
if ((atomicData = ringbufAlloc<qsortAtomicData>(atomicDataStack)) == NULL)
printf("Stack allocation error! Failing right-side launch.\n");
else
{
atomicData->lt_offset = atomicData->gt_offset = atomicData->sorted_count = 0;
unsigned int numblocks = (unsigned int)(gt_len+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
qsort_warp<<< numblocks, QSORT_BLOCKSIZE, 0, rstream >>>(outdata, indata, offset+lt_len, gt_len, atomicData, atomicDataStack, !source_is_indata, depth+1);
}
}
}
else if (gt_len > 1)
{
unsigned int bitonic_len = 1 << (__qsflo(gt_len-1U)+1);
bitonicsort<<< 1, bitonic_len, 0, rstream >>>(outdata, source_is_indata ? indata : outdata, offset+lt_len, gt_len);
}
else if (source_is_indata && (gt_len == 1))
indata[offset+lt_len] = outdata[offset+lt_len];
if (cudaPeekAtLastError() != cudaSuccess)
printf("Right-side launch fail: %s\n", cudaGetErrorString(cudaGetLastError()));
}
}
}
////////////////////////////////////////////////////////////////////////////////
//
// run_quicksort
//
// Host-side code to run the Kepler version of quicksort. It's pretty
// simple, because all launch control is handled on the device via CDP.
//
// All parallel quicksorts require an equal-sized scratch buffer. This
// must be passed in ahead of time.
//
// Returns the time elapsed for the sort.
//
////////////////////////////////////////////////////////////////////////////////
float run_quicksort_cdp(unsigned *gpudata, unsigned *scratchdata, unsigned int count, cudaStream_t stream)
{
unsigned int stacksize = QSORT_STACK_ELEMS;
// This is the stack, for atomic tracking of each sort's status
qsortAtomicData *gpustack;
checkCudaErrors(cudaMalloc((void **)&gpustack, stacksize * sizeof(qsortAtomicData)));
checkCudaErrors(cudaMemset(gpustack, 0, sizeof(qsortAtomicData))); // Only need set first entry to 0
// Create the memory ringbuffer used for handling the stack.
// Initialise everything to where it needs to be.
qsortRingbuf buf;
qsortRingbuf *ringbuf;
checkCudaErrors(cudaMalloc((void **)&ringbuf, sizeof(qsortRingbuf)));
buf.head = 1; // We start with one allocation
buf.tail = 0;
buf.count = 0;
buf.max = 0;
buf.stacksize = stacksize;
buf.stackbase = gpustack;
checkCudaErrors(cudaMemcpy(ringbuf, &buf, sizeof(buf), cudaMemcpyHostToDevice));
// Timing events...
cudaEvent_t ev1, ev2;
checkCudaErrors(cudaEventCreate(&ev1));
checkCudaErrors(cudaEventCreate(&ev2));
checkCudaErrors(cudaEventRecord(ev1));
// Now we trivially launch the qsort kernel
if (count > BITONICSORT_LEN)
{
unsigned int numblocks = (unsigned int)(count+(QSORT_BLOCKSIZE-1)) >> QSORT_BLOCKSIZE_SHIFT;
qsort_warp<<< numblocks, QSORT_BLOCKSIZE, 0, stream >>>(gpudata, scratchdata, 0U, count, gpustack, ringbuf, true, 0);
}
else
{
bitonicsort<<< 1, BITONICSORT_LEN >>>(gpudata, gpudata, 0, count);
}
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaEventRecord(ev2));
checkCudaErrors(cudaDeviceSynchronize());
float elapse=0.0f;
if (cudaPeekAtLastError() != cudaSuccess)
printf("Launch failure: %s\n", cudaGetErrorString(cudaGetLastError()));
else
checkCudaErrors(cudaEventElapsedTime(&elapse, ev1, ev2));
// Sanity check that the stack allocator is doing the right thing
checkCudaErrors(cudaMemcpy(&buf, ringbuf, sizeof(*ringbuf), cudaMemcpyDeviceToHost));
if (count > BITONICSORT_LEN && buf.head != buf.tail)
{
printf("Stack allocation error!\nRingbuf:\n");
printf("\t head = %u\n", buf.head);
printf("\t tail = %u\n", buf.tail);
printf("\tcount = %u\n", buf.count);
printf("\t max = %u\n", buf.max);
}
// Release our stack data once we're done
checkCudaErrors(cudaFree(ringbuf));
checkCudaErrors(cudaFree(gpustack));
return elapse;
}
static void usage()
{
printf("Syntax: qsort [-size=<num>] [-seed=<num>] [-debug] [-loop-step=<num>] [-verbose]\n");
printf("If loop_step is non-zero, will run from 1->array_len in steps of loop_step\n");
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
//#define ELAPSED_TIME 0
// Host side entry
int main(int argc, char *argv[])
{
uint num_of_elements;
uint i;
scanf("%d", &num_of_elements);
uint mem_size_vec = sizeof(int) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++) {
scanf("%d", &h_vec[i]);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
uint *d_scratchdata, *d_vec;
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_scratchdata, mem_size_vec));
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
cudaEventRecord(start);
float elapse;
elapse = run_quicksort_cdp(d_vec, d_scratchdata, num_of_elements, NULL);
cudaEventRecord(stop);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
cudaMemcpy(h_vec, d_vec, mem_size_vec, cudaMemcpyDeviceToHost);
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
} else
print(h_vec, num_of_elements);
free(h_vec);
cudaFree(d_vec);
cudaFree(d_scratchdata);
cudaDeviceReset();
return 0;
}
|
57a1d7c777aab264b302a1221ffc77affbb69308.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// FILE: ising3d.c
#include <string>
#include <stdio.h>
#include <math.h>
#include <stdlib.h> // Provides rand(), RAND_MAX
#include <assert.h>
#include <time.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
using std::string;
#define BLOCKLx 4
#define BLOCKLy 4
#define BLOCKLz 4
#define MyBit 1ULL
#define N64bit 64
typedef unsigned long long int bit64;
typedef bit64 spin_t;
int L, D, N;
int init, istp, mstp, nbins;
double T;
double enrg, ma, m2, m4;
spin_t *spins;
int **nnbors;
float prob[13];
texture<float, 1, hipReadModeElementType> Boltz;
__global__ void init_rand(int L, unsigned long long int seed, hiprandState_t *states) {
int global_x = blockIdx.x * blockDim.x + threadIdx.x;
int global_y = blockIdx.y * blockDim.y + threadIdx.y;
int global_z = blockIdx.z * blockDim.z + threadIdx.z;
int global_n = global_z * L * L + global_y * L + global_x;
hiprand_init(seed, global_n, global_n, &states[global_n]);
__syncthreads();
} // init_rand
__device__ float ran(hiprandState_t* global_state, int global_n) {
hiprandState_t local_state = global_state[global_n];
float r = hiprand_uniform(&local_state);
global_state[global_n] = local_state;
return r;
} // ran
__global__ void display_dims() {
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) {
if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.y == 0) {
printf("gDim.x : %i \n", gridDim.x);
printf("bDim.x : %i \n", blockDim.x);
}
}
__syncthreads();
} // display_dims
__global__ void mc_updates(int L, hiprandState_t* global_state, spin_t *d_spins, int offset) {
__shared__ spin_t local_spins[(BLOCKLx+2)*(BLOCKLy+2)*(BLOCKLz+2)];
int global_x, global_y, global_z, global_n;
int nn_global_x, nn_global_y, nn_global_z, nn_global_n;
int local_x, local_y, local_z, local_n;
spin_t sj, ss0, ss1, ss2, ss3, ss4, ss5, mask;
int xm, xp, ym, yp, zm, zp, L2 = L*L, b, dE;
global_x = blockIdx.x * blockDim.x + threadIdx.x;
global_y = blockIdx.y * blockDim.y + threadIdx.y;
global_z = blockIdx.z * blockDim.z + threadIdx.z;
global_n = global_z * L2 + global_y * L + global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[global_n];
if (threadIdx.x == 0) {
nn_global_x = ((blockIdx.x-1+gridDim.x)%gridDim.x)*BLOCKLx + BLOCKLx-1;
nn_global_y = global_y;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = 0;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.x == BLOCKLx-1) {
nn_global_x = ((blockIdx.x+1)%gridDim.x)*BLOCKLx;
nn_global_y = global_y;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = BLOCKLx+1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.y == 0) {
nn_global_x = global_x;
nn_global_y = ((blockIdx.y-1+gridDim.y)%gridDim.y)*BLOCKLy + BLOCKLy-1;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = 0;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.y == BLOCKLy-1) {
nn_global_x = global_x;
nn_global_y = ((blockIdx.y+1)%gridDim.y)*BLOCKLy;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = BLOCKLy+1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.z == 0) {
nn_global_x = global_x;
nn_global_y = global_y;
nn_global_z = ((blockIdx.z-1+gridDim.z)%gridDim.z)*BLOCKLz + BLOCKLz-1;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = 0;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.z == BLOCKLz-1) {
nn_global_x = global_x;
nn_global_y = global_y;
nn_global_z = ((blockIdx.z+1)%gridDim.z)*BLOCKLz;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = BLOCKLz+1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
__syncthreads();
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
sj = local_spins[local_n];
if ( (threadIdx.x + threadIdx.y + threadIdx.z + offset)%2 == 0 ) {
xm = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x-1;
xp = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x+1;
ym = local_z * (BLOCKLx+2)*(BLOCKLy+2) + (local_y-1) * (BLOCKLx+2) + local_x;
yp = local_z * (BLOCKLx+2)*(BLOCKLy+2) + (local_y+1) * (BLOCKLx+2) + local_x;
zm = (local_z-1) * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
zp = (local_z+1) * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
ss0 = sj ^ local_spins[xm];
ss1 = sj ^ local_spins[xp];
ss2 = sj ^ local_spins[ym];
ss3 = sj ^ local_spins[yp];
ss4 = sj ^ local_spins[zm];
ss5 = sj ^ local_spins[zp];
for (b = 0; b < N64bit; ++b) {
dE = 0;
// dE <--> 2 \sigma^B_i XOR \sigma^B_j - 1
mask = (MyBit << b);
dE += (ss0 & mask) ? 1 : -1;
dE += (ss1 & mask) ? 1 : -1;
dE += (ss2 & mask) ? 1 : -1;
dE += (ss3 & mask) ? 1 : -1;
dE += (ss4 & mask) ? 1 : -1;
dE += (ss5 & mask) ? 1 : -1;
if ( ran(global_state, global_n) < tex1Dfetch(Boltz,dE+6) )
sj ^= mask;
} // b
local_spins[local_n] = sj;
d_spins[global_n] = local_spins[local_n];
} // end of "if (Idx.x + Idx.y + Idx.z + offset)%2 == 0"
__syncthreads();
} // mc_updates
// ========================================================================== //
void initialize();
void read_file();
void set_parameters();
void allocate_arrays();
void lattice();
void configuration();
void random_conf();
void read_conf();
void write_conf();
void probability();
void deallocate_arrays();
void clean();
void measure();
void write_data();
// ========================================================================== //
int main(int argc, char* argv[]) {
hiprandState_t *devStates;
spin_t *dev_spins;
float *dev_prob;
srand(time(NULL));
initialize();
dim3 block(BLOCKLx, BLOCKLy, BLOCKLz);
dim3 grid(L/BLOCKLx, L/BLOCKLy, L/BLOCKLz);
//display_dims<<<grid, block>>>();
hipMalloc((void **)&dev_spins, N*sizeof(spin_t));
hipMalloc((void **)&devStates, N*sizeof(hiprandState_t));
hipLaunchKernelGGL(( init_rand), dim3(grid), dim3(block), 0, 0, L, rand(), devStates);
hipMalloc((void **) &dev_prob, (4*D+1) * sizeof(float));
hipMemcpy(dev_prob, prob, (4*D+1)*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(NULL, Boltz, dev_prob, (4*D+1)*sizeof(float));
hipMemcpy(dev_spins, spins, N*sizeof(spin_t), hipMemcpyHostToDevice);
for (int i = 0; i < istp; ++i) {
hipLaunchKernelGGL(( mc_updates), dim3(grid), dim3(block), 0, 0, L, devStates, dev_spins, 0);
hipLaunchKernelGGL(( mc_updates), dim3(grid), dim3(block), 0, 0, L, devStates, dev_spins, 1);
}
hipMemcpy(spins, dev_spins, N*sizeof(spin_t), hipMemcpyDeviceToHost);
write_conf();
for (int k = 0; k < nbins; ++k) {
clean();
for (int i = 0; i < mstp; ++i) {
hipLaunchKernelGGL(( mc_updates), dim3(grid), dim3(block), 0, 0, L, devStates, dev_spins, 0);
hipLaunchKernelGGL(( mc_updates), dim3(grid), dim3(block), 0, 0, L, devStates, dev_spins, 1);
hipMemcpy(spins, dev_spins, N*sizeof(spin_t), hipMemcpyDeviceToHost);
measure();
} // i-mstp
write_data();
write_conf();
} // k-bin
if (dev_prob != NULL) { hipFree(dev_prob); dev_prob = NULL; }
if (devStates != NULL) { hipFree(devStates); devStates = NULL; }
if (dev_spins != NULL) { hipFree(dev_spins); dev_spins = NULL; }
deallocate_arrays();
return 0;
} // main
void write_data()
{
FILE *ofptr;
double dmstp = (double) mstp;
enrg /= dmstp;
ma /= dmstp;
m2 /= dmstp;
m4 /= dmstp;
ofptr = fopen("data.dat","a");
fprintf(ofptr,"%12.8f %12.8f %12.8f %12.8f \n", enrg, ma, m2, m4);
fclose(ofptr);
} // write_data
void clean()
{
enrg = ma = m2 = m4 = 0.0e0;
} // clean
void measure() {
int E = 0, j, b;
int m[N64bit];
bit64 mask, ss1, ss3, ss5;
double dN = (double) N, dm , local_ma, local_m2, local_m4, d64 = (double) N64bit;
for (b = 0; b < N64bit; ++b) m[b] = 0;
for (j = 0; j < N; ++j) {
ss1 = spins[j] ^ spins[ nnbors[j][1] ];
ss3 = spins[j] ^ spins[ nnbors[j][3] ];
ss5 = spins[j] ^ spins[ nnbors[j][5] ];
for (b = 0; b < N64bit; ++b) {
mask = (MyBit << b);
m[b] += ( (spins[j] & mask) ? 1 : -1 );
E += ( (ss1 & mask) ? -1 : 1 );
E += ( (ss3 & mask) ? -1 : 1 );
E += ( (ss5 & mask) ? -1 : 1 );
} // b
} // j
enrg += (-(double) E)/(dN * (double) N64bit);
local_ma = local_m2 = local_m4 = 0.0;
for (b = 0; b < N64bit; ++b) {
dm = (double) m[b]/dN;
local_ma += fabs(dm);
local_m2 += pow(dm,2.0);
local_m4 += pow(dm,4.0);
} //b
ma += local_ma/d64;
m2 += local_m2/d64;
m4 += local_m4/d64;
} // measure
void initialize()
{
// 1) read-in input parameters
read_file();
// 2) set simulation parameters
set_parameters();
// 3) allocate arrays
allocate_arrays();
// 4) generate the 2D lattice
lattice();
// 5) generate the initial configuration
configuration();
// 6) construct the probbility table
probability();
} // initialize
void probability()
{
float beta = 1.0e0/T;
// e <--> 2 \sigma^B_i XOR \sigma^B_j - 1
for (int e = -6; e <= 6; ++e)
prob[e+6] = exp(2.0 * beta * (float) e);
} // probability
void configuration()
{
if (init == 0)
random_conf();
else
read_conf();
} // configuration
void random_conf()
{
for (int i = 0; i < N; ++i) {
spins[i] = 0;
for(int b = 0; b < N64bit; ++b) {
if (((double)rand())/((double)RAND_MAX) > 0.5)
spins[i] ^= (MyBit << b);
} // b
} // i
} // random_conf
void read_conf()
{
FILE *fptr;
fptr = fopen("spins.dat", "rt");
if (fptr == NULL) { printf("can not open spins.dat"); exit(0); }
for (int i = 0; i < N; ++i)
fscanf(fptr, "%llu", &spins[i]);
fclose(fptr);
} // read_conf
void write_conf()
{
FILE *ofptr;
int i;
ofptr = fopen("spins.dat","w");
for (i = 0; i < N; ++i)
fprintf(ofptr,"%llu\n",spins[i]);
fclose(ofptr);
} // write_conf
void lattice()
{
int L2 = L * L;
for (int z0 = 0; z0 < L; ++z0) {
for (int y0 = 0; y0 < L; ++y0) {
for (int x0 = 0; x0 < L; ++x0) {
int x1 = (x0-1+L)%L;
int x2 = (x0+1)%L;
int y1 = (y0-1+L)%L;
int y2 = (y0+1)%L;
int z1 = (z0-1+L)%L;
int z2 = (z0+1)%L;
int j = z0 * L2 + y0 * L + x0;
nnbors[j][0] = z0 * L2 + y0 * L + x1;
nnbors[j][1] = z0 * L2 + y0 * L + x2;
nnbors[j][2] = z0 * L2 + y1 * L + x0;
nnbors[j][3] = z0 * L2 + y2 * L + x0;
nnbors[j][4] = z1 * L2 + y0 * L + x0;
nnbors[j][5] = z2 * L2 + y0 * L + x0;
} // x0
} // y0
} // z0
} // lattice
void deallocate_arrays()
{
if (spins != NULL) { free(spins); spins = NULL; }
for (int i = 0; i < N; ++i)
if (nnbors[i] != NULL)
free(nnbors[i]);
if (nnbors != NULL)
free(nnbors);
nnbors = NULL;
} // deallocate_arrays
void allocate_arrays()
{
spins = (spin_t *) malloc(N * sizeof(spin_t));
nnbors = (int **) malloc(N*sizeof(int*));
for (int i = 0; i < N; ++i)
nnbors[i] = (int *) malloc(2*D*sizeof(int));
} // allocate_arrays
void set_parameters()
{
N = (int) pow(L, D);
} // set_parameters
void read_file()
{
FILE *fptr;
fptr = fopen("input.in", "rt");
if (fptr == NULL) {
printf("can not open input.in");
exit(0);
}
fscanf(fptr,"%i %i %lf", &D, &L, &T);
fscanf(fptr,"%i %i %i %i", &init, &istp, &mstp, &nbins);
fclose(fptr);
} // read_file
| 57a1d7c777aab264b302a1221ffc77affbb69308.cu | // FILE: ising3d.c
#include <string>
#include <stdio.h>
#include <math.h>
#include <stdlib.h> // Provides rand(), RAND_MAX
#include <assert.h>
#include <time.h>
#include <curand.h>
#include <curand_kernel.h>
using std::string;
#define BLOCKLx 4
#define BLOCKLy 4
#define BLOCKLz 4
#define MyBit 1ULL
#define N64bit 64
typedef unsigned long long int bit64;
typedef bit64 spin_t;
int L, D, N;
int init, istp, mstp, nbins;
double T;
double enrg, ma, m2, m4;
spin_t *spins;
int **nnbors;
float prob[13];
texture<float, 1, cudaReadModeElementType> Boltz;
__global__ void init_rand(int L, unsigned long long int seed, curandState_t *states) {
int global_x = blockIdx.x * blockDim.x + threadIdx.x;
int global_y = blockIdx.y * blockDim.y + threadIdx.y;
int global_z = blockIdx.z * blockDim.z + threadIdx.z;
int global_n = global_z * L * L + global_y * L + global_x;
curand_init(seed, global_n, global_n, &states[global_n]);
__syncthreads();
} // init_rand
__device__ float ran(curandState* global_state, int global_n) {
curandState_t local_state = global_state[global_n];
float r = curand_uniform(&local_state);
global_state[global_n] = local_state;
return r;
} // ran
__global__ void display_dims() {
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) {
if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.y == 0) {
printf("gDim.x : %i \n", gridDim.x);
printf("bDim.x : %i \n", blockDim.x);
}
}
__syncthreads();
} // display_dims
__global__ void mc_updates(int L, curandState* global_state, spin_t *d_spins, int offset) {
__shared__ spin_t local_spins[(BLOCKLx+2)*(BLOCKLy+2)*(BLOCKLz+2)];
int global_x, global_y, global_z, global_n;
int nn_global_x, nn_global_y, nn_global_z, nn_global_n;
int local_x, local_y, local_z, local_n;
spin_t sj, ss0, ss1, ss2, ss3, ss4, ss5, mask;
int xm, xp, ym, yp, zm, zp, L2 = L*L, b, dE;
global_x = blockIdx.x * blockDim.x + threadIdx.x;
global_y = blockIdx.y * blockDim.y + threadIdx.y;
global_z = blockIdx.z * blockDim.z + threadIdx.z;
global_n = global_z * L2 + global_y * L + global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[global_n];
if (threadIdx.x == 0) {
nn_global_x = ((blockIdx.x-1+gridDim.x)%gridDim.x)*BLOCKLx + BLOCKLx-1;
nn_global_y = global_y;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = 0;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.x == BLOCKLx-1) {
nn_global_x = ((blockIdx.x+1)%gridDim.x)*BLOCKLx;
nn_global_y = global_y;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = BLOCKLx+1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.y == 0) {
nn_global_x = global_x;
nn_global_y = ((blockIdx.y-1+gridDim.y)%gridDim.y)*BLOCKLy + BLOCKLy-1;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = 0;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.y == BLOCKLy-1) {
nn_global_x = global_x;
nn_global_y = ((blockIdx.y+1)%gridDim.y)*BLOCKLy;
nn_global_z = global_z;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = BLOCKLy+1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.z == 0) {
nn_global_x = global_x;
nn_global_y = global_y;
nn_global_z = ((blockIdx.z-1+gridDim.z)%gridDim.z)*BLOCKLz + BLOCKLz-1;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = 0;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
if (threadIdx.z == BLOCKLz-1) {
nn_global_x = global_x;
nn_global_y = global_y;
nn_global_z = ((blockIdx.z+1)%gridDim.z)*BLOCKLz;
nn_global_n = nn_global_z * L2 + nn_global_y * L + nn_global_x;
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = BLOCKLz+1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
local_spins[local_n] = d_spins[nn_global_n];
}
__syncthreads();
local_x = threadIdx.x + 1;
local_y = threadIdx.y + 1;
local_z = threadIdx.z + 1;
local_n = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
sj = local_spins[local_n];
if ( (threadIdx.x + threadIdx.y + threadIdx.z + offset)%2 == 0 ) {
xm = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x-1;
xp = local_z * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x+1;
ym = local_z * (BLOCKLx+2)*(BLOCKLy+2) + (local_y-1) * (BLOCKLx+2) + local_x;
yp = local_z * (BLOCKLx+2)*(BLOCKLy+2) + (local_y+1) * (BLOCKLx+2) + local_x;
zm = (local_z-1) * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
zp = (local_z+1) * (BLOCKLx+2)*(BLOCKLy+2) + local_y * (BLOCKLx+2) + local_x;
ss0 = sj ^ local_spins[xm];
ss1 = sj ^ local_spins[xp];
ss2 = sj ^ local_spins[ym];
ss3 = sj ^ local_spins[yp];
ss4 = sj ^ local_spins[zm];
ss5 = sj ^ local_spins[zp];
for (b = 0; b < N64bit; ++b) {
dE = 0;
// dE <--> 2 \sigma^B_i XOR \sigma^B_j - 1
mask = (MyBit << b);
dE += (ss0 & mask) ? 1 : -1;
dE += (ss1 & mask) ? 1 : -1;
dE += (ss2 & mask) ? 1 : -1;
dE += (ss3 & mask) ? 1 : -1;
dE += (ss4 & mask) ? 1 : -1;
dE += (ss5 & mask) ? 1 : -1;
if ( ran(global_state, global_n) < tex1Dfetch(Boltz,dE+6) )
sj ^= mask;
} // b
local_spins[local_n] = sj;
d_spins[global_n] = local_spins[local_n];
} // end of "if (Idx.x + Idx.y + Idx.z + offset)%2 == 0"
__syncthreads();
} // mc_updates
// ========================================================================== //
void initialize();
void read_file();
void set_parameters();
void allocate_arrays();
void lattice();
void configuration();
void random_conf();
void read_conf();
void write_conf();
void probability();
void deallocate_arrays();
void clean();
void measure();
void write_data();
// ========================================================================== //
int main(int argc, char* argv[]) {
curandState_t *devStates;
spin_t *dev_spins;
float *dev_prob;
srand(time(NULL));
initialize();
dim3 block(BLOCKLx, BLOCKLy, BLOCKLz);
dim3 grid(L/BLOCKLx, L/BLOCKLy, L/BLOCKLz);
//display_dims<<<grid, block>>>();
cudaMalloc((void **)&dev_spins, N*sizeof(spin_t));
cudaMalloc((void **)&devStates, N*sizeof(curandState_t));
init_rand<<<grid, block>>>(L, rand(), devStates);
cudaMalloc((void **) &dev_prob, (4*D+1) * sizeof(float));
cudaMemcpy(dev_prob, prob, (4*D+1)*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(NULL, Boltz, dev_prob, (4*D+1)*sizeof(float));
cudaMemcpy(dev_spins, spins, N*sizeof(spin_t), cudaMemcpyHostToDevice);
for (int i = 0; i < istp; ++i) {
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 0);
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 1);
}
cudaMemcpy(spins, dev_spins, N*sizeof(spin_t), cudaMemcpyDeviceToHost);
write_conf();
for (int k = 0; k < nbins; ++k) {
clean();
for (int i = 0; i < mstp; ++i) {
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 0);
mc_updates<<<grid, block>>>(L, devStates, dev_spins, 1);
cudaMemcpy(spins, dev_spins, N*sizeof(spin_t), cudaMemcpyDeviceToHost);
measure();
} // i-mstp
write_data();
write_conf();
} // k-bin
if (dev_prob != NULL) { cudaFree(dev_prob); dev_prob = NULL; }
if (devStates != NULL) { cudaFree(devStates); devStates = NULL; }
if (dev_spins != NULL) { cudaFree(dev_spins); dev_spins = NULL; }
deallocate_arrays();
return 0;
} // main
void write_data()
{
FILE *ofptr;
double dmstp = (double) mstp;
enrg /= dmstp;
ma /= dmstp;
m2 /= dmstp;
m4 /= dmstp;
ofptr = fopen("data.dat","a");
fprintf(ofptr,"%12.8f %12.8f %12.8f %12.8f \n", enrg, ma, m2, m4);
fclose(ofptr);
} // write_data
void clean()
{
enrg = ma = m2 = m4 = 0.0e0;
} // clean
void measure() {
int E = 0, j, b;
int m[N64bit];
bit64 mask, ss1, ss3, ss5;
double dN = (double) N, dm , local_ma, local_m2, local_m4, d64 = (double) N64bit;
for (b = 0; b < N64bit; ++b) m[b] = 0;
for (j = 0; j < N; ++j) {
ss1 = spins[j] ^ spins[ nnbors[j][1] ];
ss3 = spins[j] ^ spins[ nnbors[j][3] ];
ss5 = spins[j] ^ spins[ nnbors[j][5] ];
for (b = 0; b < N64bit; ++b) {
mask = (MyBit << b);
m[b] += ( (spins[j] & mask) ? 1 : -1 );
E += ( (ss1 & mask) ? -1 : 1 );
E += ( (ss3 & mask) ? -1 : 1 );
E += ( (ss5 & mask) ? -1 : 1 );
} // b
} // j
enrg += (-(double) E)/(dN * (double) N64bit);
local_ma = local_m2 = local_m4 = 0.0;
for (b = 0; b < N64bit; ++b) {
dm = (double) m[b]/dN;
local_ma += fabs(dm);
local_m2 += pow(dm,2.0);
local_m4 += pow(dm,4.0);
} //b
ma += local_ma/d64;
m2 += local_m2/d64;
m4 += local_m4/d64;
} // measure
void initialize()
{
// 1) read-in input parameters
read_file();
// 2) set simulation parameters
set_parameters();
// 3) allocate arrays
allocate_arrays();
// 4) generate the 2D lattice
lattice();
// 5) generate the initial configuration
configuration();
// 6) construct the probbility table
probability();
} // initialize
void probability()
{
float beta = 1.0e0/T;
// e <--> 2 \sigma^B_i XOR \sigma^B_j - 1
for (int e = -6; e <= 6; ++e)
prob[e+6] = exp(2.0 * beta * (float) e);
} // probability
void configuration()
{
if (init == 0)
random_conf();
else
read_conf();
} // configuration
void random_conf()
{
for (int i = 0; i < N; ++i) {
spins[i] = 0;
for(int b = 0; b < N64bit; ++b) {
if (((double)rand())/((double)RAND_MAX) > 0.5)
spins[i] ^= (MyBit << b);
} // b
} // i
} // random_conf
void read_conf()
{
FILE *fptr;
fptr = fopen("spins.dat", "rt");
if (fptr == NULL) { printf("can not open spins.dat"); exit(0); }
for (int i = 0; i < N; ++i)
fscanf(fptr, "%llu", &spins[i]);
fclose(fptr);
} // read_conf
void write_conf()
{
FILE *ofptr;
int i;
ofptr = fopen("spins.dat","w");
for (i = 0; i < N; ++i)
fprintf(ofptr,"%llu\n",spins[i]);
fclose(ofptr);
} // write_conf
void lattice()
{
int L2 = L * L;
for (int z0 = 0; z0 < L; ++z0) {
for (int y0 = 0; y0 < L; ++y0) {
for (int x0 = 0; x0 < L; ++x0) {
int x1 = (x0-1+L)%L;
int x2 = (x0+1)%L;
int y1 = (y0-1+L)%L;
int y2 = (y0+1)%L;
int z1 = (z0-1+L)%L;
int z2 = (z0+1)%L;
int j = z0 * L2 + y0 * L + x0;
nnbors[j][0] = z0 * L2 + y0 * L + x1;
nnbors[j][1] = z0 * L2 + y0 * L + x2;
nnbors[j][2] = z0 * L2 + y1 * L + x0;
nnbors[j][3] = z0 * L2 + y2 * L + x0;
nnbors[j][4] = z1 * L2 + y0 * L + x0;
nnbors[j][5] = z2 * L2 + y0 * L + x0;
} // x0
} // y0
} // z0
} // lattice
void deallocate_arrays()
{
if (spins != NULL) { free(spins); spins = NULL; }
for (int i = 0; i < N; ++i)
if (nnbors[i] != NULL)
free(nnbors[i]);
if (nnbors != NULL)
free(nnbors);
nnbors = NULL;
} // deallocate_arrays
void allocate_arrays()
{
spins = (spin_t *) malloc(N * sizeof(spin_t));
nnbors = (int **) malloc(N*sizeof(int*));
for (int i = 0; i < N; ++i)
nnbors[i] = (int *) malloc(2*D*sizeof(int));
} // allocate_arrays
void set_parameters()
{
N = (int) pow(L, D);
} // set_parameters
void read_file()
{
FILE *fptr;
fptr = fopen("input.in", "rt");
if (fptr == NULL) {
printf("can not open input.in");
exit(0);
}
fscanf(fptr,"%i %i %lf", &D, &L, &T);
fscanf(fptr,"%i %i %i %i", &init, &istp, &mstp, &nbins);
fclose(fptr);
} // read_file
|
a1fe23bc71a33442a92e8b80da85735d563e779d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <iostream>
typedef unsigned int uint;
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 512
#endif
template <typename T>
struct Plus {
__host__ __device__
T operator()(const T x, const T y)
{
return x + y;
}
};
template <typename T>
struct Minus {
__host__ __device__
T operator()(const T x, const T y)
{
return x - y;
}
};
template<typename Op>
__global__ void adjustment(uint* d_vec, uint* d_seg, uint num_of_elements, uint* d_max ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_elements) {
uint mostSignificantBit = (uint)log2((double)*d_max) + 1;
uint segIndex = d_seg[id] << mostSignificantBit;
Op op = Op();
d_vec[id] = op(d_vec[id], segIndex);
}
}
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("1: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("1: Async kernel error: %s\n", hipGetErrorString(errAsync));
}
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
int main(void) {
uint num_of_segments;
uint num_of_elements;
uint i;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments + 1);
uint *h_seg_aux = (uint *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
uint *h_value = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++) {
scanf("%d", &h_vec[i]);
h_value[i] = i;
}
uint *h_seg = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_segments; i++) {
for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) {
h_seg[j] = i;
}
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
uint *d_value, *d_value_out, *d_vec, *d_vec_out, *d_max, *d_seg;
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
uint* max_val = (uint *) malloc(sizeof(uint));
cudaTest(hipMalloc((void **) &d_max, sizeof(uint)));
cudaTest(hipMalloc((void **) &d_vec, mem_size_vec));
cudaTest(hipMalloc((void **) &d_seg, mem_size_vec));
cudaTest(hipMalloc((void **) &d_value, mem_size_vec));
cudaTest(hipMalloc((void **) &d_vec_out, mem_size_vec));
cudaTest(hipMalloc((void **) &d_value_out, mem_size_vec));
cudaTest(hipMemcpy(d_value, h_value, mem_size_vec, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_seg, h_seg, mem_size_vec, hipMemcpyHostToDevice));
void *d_temp = NULL;
size_t temp_bytes = 0;
int grid = ((num_of_elements-1)/BLOCK_SIZE) + 1;
float averageExecutions = 0;
for (uint i = 0; i < EXECUTIONS; i++) {
cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice));
/*
* maximum element of the array.
*/
hipEventRecord(start);
hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements);
hipMalloc(&d_temp_storage, temp_storage_bytes); // Allocate temporary storage
hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); // Run max-reduction
/*
* add prefix to the elements
*/
hipLaunchKernelGGL(( adjustment<Plus<uint>>) , dim3(grid), dim3(BLOCK_SIZE), 0, 0, d_vec, d_seg, num_of_elements, d_max);
/*
* sort the vector
*/
hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
hipMalloc((void **) &d_temp, temp_bytes);
hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync));
hipLaunchKernelGGL(( adjustment<Minus<uint>>) , dim3(grid), dim3(BLOCK_SIZE), 0, 0, d_vec_out, d_seg, num_of_elements, d_max);
hipEventRecord(stop);
hipEventSynchronize(stop);
if (ELAPSED_TIME == 1) {
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
averageExecutions += milliseconds;
//std::cout << milliseconds << "\n";
}
hipFree(d_temp_storage);
temp_storage_bytes = 0;
d_temp_storage = NULL;
hipFree(d_temp);
temp_bytes = 0;
d_temp = NULL;
hipDeviceSynchronize();
}
hipMemcpy(h_vec, d_vec_out, mem_size_vec, hipMemcpyDeviceToHost);
hipFree(d_max);
hipFree(d_seg);
hipFree(d_vec);
hipFree(d_vec_out);
hipFree(d_value);
hipFree(d_value_out);
if (ELAPSED_TIME != 1) {
print(h_vec, num_of_elements);
}
else { std::cout << averageExecutions/EXECUTIONS << "\n"; }
free(h_seg_aux);
free(h_seg);
free(h_vec);
free(h_value);
return 0;
}
| a1fe23bc71a33442a92e8b80da85735d563e779d.cu | /*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <cub/util_allocator.cuh>
#include <cub/device/device_radix_sort.cuh>
#include <cub/device/device_reduce.cuh>
#include <iostream>
typedef unsigned int uint;
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 512
#endif
template <typename T>
struct Plus {
__host__ __device__
T operator()(const T x, const T y)
{
return x + y;
}
};
template <typename T>
struct Minus {
__host__ __device__
T operator()(const T x, const T y)
{
return x - y;
}
};
template<typename Op>
__global__ void adjustment(uint* d_vec, uint* d_seg, uint num_of_elements, uint* d_max ){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_elements) {
uint mostSignificantBit = (uint)log2((double)*d_max) + 1;
uint segIndex = d_seg[id] << mostSignificantBit;
Op op = Op();
d_vec[id] = op(d_vec[id], segIndex);
}
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit (EXIT_FAILURE);
}
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("1: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("1: Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
void print(uint* host_data, uint n) {
std::cout << "\n";
for (uint i = 0; i < n; i++) {
std::cout << host_data[i] << " ";
}
std::cout << "\n";
}
int main(void) {
uint num_of_segments;
uint num_of_elements;
uint i;
scanf("%d", &num_of_segments);
uint mem_size_seg = sizeof(uint) * (num_of_segments + 1);
uint *h_seg_aux = (uint *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(uint) * num_of_elements;
uint *h_vec = (uint *) malloc(mem_size_vec);
uint *h_value = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++) {
scanf("%d", &h_vec[i]);
h_value[i] = i;
}
uint *h_seg = (uint *) malloc(mem_size_vec);
for (i = 0; i < num_of_segments; i++) {
for (uint j = h_seg_aux[i]; j < h_seg_aux[i + 1]; j++) {
h_seg[j] = i;
}
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
uint *d_value, *d_value_out, *d_vec, *d_vec_out, *d_max, *d_seg;
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
uint* max_val = (uint *) malloc(sizeof(uint));
cudaTest(cudaMalloc((void **) &d_max, sizeof(uint)));
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_seg, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_value, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_vec_out, mem_size_vec));
cudaTest(cudaMalloc((void **) &d_value_out, mem_size_vec));
cudaTest(cudaMemcpy(d_value, h_value, mem_size_vec, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_vec, cudaMemcpyHostToDevice));
void *d_temp = NULL;
size_t temp_bytes = 0;
int grid = ((num_of_elements-1)/BLOCK_SIZE) + 1;
float averageExecutions = 0;
for (uint i = 0; i < EXECUTIONS; i++) {
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
/*
* maximum element of the array.
*/
cudaEventRecord(start);
cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements);
cudaMalloc(&d_temp_storage, temp_storage_bytes); // Allocate temporary storage
cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_vec, d_max, num_of_elements); // Run max-reduction
/*
* add prefix to the elements
*/
adjustment<Plus<uint>> <<< grid, BLOCK_SIZE>>>(d_vec, d_seg, num_of_elements, d_max);
/*
* sort the vector
*/
cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
cudaMalloc((void **) &d_temp, temp_bytes);
cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out,
d_value, d_value_out, num_of_elements);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync));
adjustment<Minus<uint>> <<< grid, BLOCK_SIZE>>>(d_vec_out, d_seg, num_of_elements, d_max);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
if (ELAPSED_TIME == 1) {
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
averageExecutions += milliseconds;
//std::cout << milliseconds << "\n";
}
cudaFree(d_temp_storage);
temp_storage_bytes = 0;
d_temp_storage = NULL;
cudaFree(d_temp);
temp_bytes = 0;
d_temp = NULL;
cudaDeviceSynchronize();
}
cudaMemcpy(h_vec, d_vec_out, mem_size_vec, cudaMemcpyDeviceToHost);
cudaFree(d_max);
cudaFree(d_seg);
cudaFree(d_vec);
cudaFree(d_vec_out);
cudaFree(d_value);
cudaFree(d_value_out);
if (ELAPSED_TIME != 1) {
print(h_vec, num_of_elements);
}
else { std::cout << averageExecutions/EXECUTIONS << "\n"; }
free(h_seg_aux);
free(h_seg);
free(h_vec);
free(h_value);
return 0;
}
|
38381387e58c49ac1502e551aa9047ac435a9ce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h> // exp
/* Data structures */
struct YieldCurveData {
double P; // Discount Factor Function
double t; // Time [days]
};
struct OptionData {
double strike;
double maturity;
int number_of_terms;
double reversion_rate;
double volatility;
};
/* Probability measures for tree construction */
// Exhibit 1A (-jmax < j < jmax)
#define PU_A(j, M) ((1/6) + (((double)(j*j))*M*M + ((double)j)*M) * (1/2))
#define PM_A(j, M) ((2/3) - ((double)(j*j))*M*M)
#define PD_A(j, M) ((1/6) + (((double)(j*j))*M*M - ((double)j)*M) * (1/2))
// Exhibit 1B (j == -jmax)
#define PU_B(j, M) ( (1/6) + (((double)(j*j))*M*M - ((double)j)*M) * (1/2))
#define PM_B(j, M) (-(1/3) - (((double)(j*j))*M*M) - (2*((double)j)*M))
#define PD_B(j, M) ( (7/6) + (((double)(j*j))*M*M - (3*(double)j)*M) * (1/2))
// Exhibit 1C (j == jmax)
#define PU_C(j, M) ( (7/6) + (((double)(j*j))*M*M + (3*(double)j)*M) * (1/2))
#define PM_C(j, M) (-(1/3) - (((double)(j*j))*M*M) - (2*((double)j)*M))
#define PD_C(j, M) ( (1/6) + (((double)(j*j))*M*M + ((double)j)*M) * (1/2))
/* forward propagation helper */
double forward_helper(double M, double dr, double dt, double alphai,
double *QCopy, int beg_ind, int m, int i,
int imax, int jmax, int j)
{
double eRdt_u1 = exp(-((double)(j+1)*dr+alphai)*dt);
double eRdt = exp(-((double)(j) *dr+alphai)*dt);
double eRdt_d1 = exp(-((double)(j-1)*dr+alphai)*dt);
double res;
double pu, pm, pd;
if (i < jmax) {
pu = PU_A(j-1, M);
pm = PM_A(j, M);
pd = PD_A(j+1, M);
if((i == 0) && (j == 0)) {
res = pm*QCopy[beg_ind+j+m]*eRdt;
} else if(j == (-imax + 1)) {
res = pd*QCopy[beg_ind+j+m+1]*eRdt_u1 + pm*QCopy[beg_ind+j+m]*eRdt;
} else if(j == ( imax - 1)) {
res = pm*QCopy[beg_ind+j+m]*eRdt + pu*QCopy[beg_ind+j+m-1]*eRdt_d1;
} else if(j == (-imax)) {
res = pd*QCopy[beg_ind+j+m+1]*eRdt_u1;
} else if(j == ( imax)) {
res = pu*QCopy[beg_ind+j+m-1]*eRdt_d1;
} else {
res = // return
pd*QCopy[beg_ind+j+m+1]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j+m-1]*eRdt_d1;
}
} // END_OF: (i < jmax) {
else if(j == jmax) {
pm = PU_C(j, M);
pu = PU_A(j-1, M);
res = pm*QCopy[beg_ind+j+m]*eRdt + pu*QCopy[beg_ind+j-1+m] * eRdt_d1; // return
} // END_OF: (j == jmax)
else if(j == (jmax - 1)) {
pd = PM_C(j+1, M);
pm = PM_A(j , M);
pu = PU_A(j-1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: (j == (jmax - 1))
else if(j == (jmax - 2)) {
double eRdt_u2 = exp(-(((double)(j+2))*dr + alphai) * dt);
double pd_c = PD_C(j + 2, M);
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PU_A(j - 1, M);
res =
pd_c*QCopy[beg_ind+j+2+m]*eRdt_u2 +
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: (jmax - 2))
else if(j == (-jmax + 2)) {
double eRdt_d2 = exp(-(((double)(j-2))*dr + alphai) * dt);
double pu_b = PU_B(j - 2, M);
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PU_A(j - 1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1 +
pu_b*QCopy[beg_ind+j-2+m]*eRdt_d2;
} // END_OF: (j == (-jmax + 2))
else if(j == (-jmax + 1)) {
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PM_B(j - 1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: (j == (-jmax + 1))
else if(j == (-jmax)) {
pd = PD_A(j + 1, M);
pm = PD_B(j, M);
res = pd*QCopy[beg_ind+j+1+m]*eRdt_u1 + pm*QCopy[beg_ind+j+m]*eRdt;
} // END_OF: (-jmax)
else {
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PU_A(j - 1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: default
return res;
}
/* backward propagation helper */
double backward_helper(double X, double M, double dr, double dt,
double alphai, double *CallCopy, int beg_ind, int m,
int i, int jmax, int j)
{
double eRdt = exp(-((double)(j)*dr + alphai)*dt);
double res;
double pu, pm, pd;
// define res in big if-statement
if (i < jmax) {
// central node
pu = PU_A(j, M);
pm = PM_A(j, M);
pd = PD_A(j, M);
res = (pu*CallCopy[beg_ind+j+m+1] +
pm*CallCopy[beg_ind+j+m] +
pd*CallCopy[beg_ind+j+m-1]) *
eRdt;
} else if(j == jmax) {
// top node
pu = PU_C(j, M);
pm = PM_C(j, M);
pd = PD_C(j, M);
res = (pu*CallCopy[beg_ind+j+m] +
pm*CallCopy[beg_ind+j+m-1] +
pd*CallCopy[beg_ind+j+m-2]) *
eRdt;
} else if(j == -jmax) {
// bottom node
pu = PU_B(j, M);
pm = PM_B(j, M);
pd = PD_B(j, M);
res = (pu*CallCopy[beg_ind+j+m+2] +
pm*CallCopy[beg_ind+j+m+1] +
pd*CallCopy[beg_ind+j+m]) *
eRdt;
} else {
// central node
pu = PU_A(j, M);
pm = PM_A(j, M);
pd = PD_A(j, M);
res = (pu*CallCopy[beg_ind+j+m+1] +
pm*CallCopy[beg_ind+j+m] +
pd*CallCopy[beg_ind+j+m-1]) *
eRdt;
}
// OBS: need to define your own max function
// The 3 is the length of contract. Here 3 years. Maybe parameterize it?
if(i == ((int)(3 / dt))) { res = max_for_doubles(X - res, 0); }
return res;
}
// START: Cosmin's
// int option_chunk_x = options_in_chunk[blockIdx.x];
// int *option_in_chunk_x = option_indices + blockIdx.x * max_options_in_chunk;
// double X = strikes[options_in_chunk[i]];
/* trinomial chunk kernel */
__global__ void trinom_chunk_kernel(double yield_curve,
double *strikes,
double *maturities,
double *reversion_rates,
double *volatilities,
int *num_termss,
int n_max, // maximum number of time steps
int *options_in_chunk, // size: [number_of_blocks]
int *option_indices, // size: [number_of_blocks][maxOptionsInChunk]
int max_options_in_chunk)
{
// computing option id
unsigned int lid = threadIdx.x;
int option_chunk = options_in_chunk[blockIdx.x];
int *options_in_chunk = option_indices + blockIdx.x * max_options_in_chunk;
int option_id = options_in_chunk[lid];
// computing option specific values
double X = strikes[option_id];
double T = maturities[option_id];
int n = num_termss[option_id];
double a = reversion_rates[option_id];
double sigma = volatilities[option_id];
double dt = T / ((double) n);
double V = sigma * sigma * (1 - (exp(0.0 - 2.0 * a * dt)) ) / (2.0 * a);
double dr = sqrt((1.0 + 2.0) * V);
double M = exp(0.0 - a * dt) - 1.0;
double jmax = ((int)(-0.184 / M)) + 1;
int m = jmax + 2;
if(lid < sum_of_qlens_in_block) { // maybe some guard here
// 1. forward iteration
for(int i=0; i<n_max; i++) {
if() { // guard because of __synthreads
}
// barrier because of dependency between q_{i} and q_{i+1}
__syncthreads();
}
// 2. backward iteration
for(int i=0; i<n_max; i++) {
if() { // guard because of __synthreads
}
// barrier because of dependency between c_{i-1} and c_{i}
__syncthreads();
}
} // END: lid < sum_of_qlens_in_block
}
int main()
{
// small.in - should be read from file
double strike[1] = 0.7965300572556244; // long double?
double maturity[1] = 9.0000;
int num_terms[1] = 108;
double reversion_rate[1] = 0.1000;
double volatility[1] = 0.0100;
// set maximum chunk size
int w = 256;
// start out with: (assuming that all options are equal)
// n_max := options[0].n
// m_max := options[0].m
// compute: chunks
// each thread should know here to read from when data is
// in global device memory
// copy data from host to device
// compute block and grid dimensions
// - block: (1, 1, w)
// - grid: (1, ceil(sum(Qlen) / w)
// execute kernel
// copy data from device to host
return 0;
}
| 38381387e58c49ac1502e551aa9047ac435a9ce0.cu | #include <math.h> // exp
/* Data structures */
struct YieldCurveData {
double P; // Discount Factor Function
double t; // Time [days]
};
struct OptionData {
double strike;
double maturity;
int number_of_terms;
double reversion_rate;
double volatility;
};
/* Probability measures for tree construction */
// Exhibit 1A (-jmax < j < jmax)
#define PU_A(j, M) ((1/6) + (((double)(j*j))*M*M + ((double)j)*M) * (1/2))
#define PM_A(j, M) ((2/3) - ((double)(j*j))*M*M)
#define PD_A(j, M) ((1/6) + (((double)(j*j))*M*M - ((double)j)*M) * (1/2))
// Exhibit 1B (j == -jmax)
#define PU_B(j, M) ( (1/6) + (((double)(j*j))*M*M - ((double)j)*M) * (1/2))
#define PM_B(j, M) (-(1/3) - (((double)(j*j))*M*M) - (2*((double)j)*M))
#define PD_B(j, M) ( (7/6) + (((double)(j*j))*M*M - (3*(double)j)*M) * (1/2))
// Exhibit 1C (j == jmax)
#define PU_C(j, M) ( (7/6) + (((double)(j*j))*M*M + (3*(double)j)*M) * (1/2))
#define PM_C(j, M) (-(1/3) - (((double)(j*j))*M*M) - (2*((double)j)*M))
#define PD_C(j, M) ( (1/6) + (((double)(j*j))*M*M + ((double)j)*M) * (1/2))
/* forward propagation helper */
double forward_helper(double M, double dr, double dt, double alphai,
double *QCopy, int beg_ind, int m, int i,
int imax, int jmax, int j)
{
double eRdt_u1 = exp(-((double)(j+1)*dr+alphai)*dt);
double eRdt = exp(-((double)(j) *dr+alphai)*dt);
double eRdt_d1 = exp(-((double)(j-1)*dr+alphai)*dt);
double res;
double pu, pm, pd;
if (i < jmax) {
pu = PU_A(j-1, M);
pm = PM_A(j, M);
pd = PD_A(j+1, M);
if((i == 0) && (j == 0)) {
res = pm*QCopy[beg_ind+j+m]*eRdt;
} else if(j == (-imax + 1)) {
res = pd*QCopy[beg_ind+j+m+1]*eRdt_u1 + pm*QCopy[beg_ind+j+m]*eRdt;
} else if(j == ( imax - 1)) {
res = pm*QCopy[beg_ind+j+m]*eRdt + pu*QCopy[beg_ind+j+m-1]*eRdt_d1;
} else if(j == (-imax)) {
res = pd*QCopy[beg_ind+j+m+1]*eRdt_u1;
} else if(j == ( imax)) {
res = pu*QCopy[beg_ind+j+m-1]*eRdt_d1;
} else {
res = // return
pd*QCopy[beg_ind+j+m+1]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j+m-1]*eRdt_d1;
}
} // END_OF: (i < jmax) {
else if(j == jmax) {
pm = PU_C(j, M);
pu = PU_A(j-1, M);
res = pm*QCopy[beg_ind+j+m]*eRdt + pu*QCopy[beg_ind+j-1+m] * eRdt_d1; // return
} // END_OF: (j == jmax)
else if(j == (jmax - 1)) {
pd = PM_C(j+1, M);
pm = PM_A(j , M);
pu = PU_A(j-1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: (j == (jmax - 1))
else if(j == (jmax - 2)) {
double eRdt_u2 = exp(-(((double)(j+2))*dr + alphai) * dt);
double pd_c = PD_C(j + 2, M);
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PU_A(j - 1, M);
res =
pd_c*QCopy[beg_ind+j+2+m]*eRdt_u2 +
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: (jmax - 2))
else if(j == (-jmax + 2)) {
double eRdt_d2 = exp(-(((double)(j-2))*dr + alphai) * dt);
double pu_b = PU_B(j - 2, M);
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PU_A(j - 1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1 +
pu_b*QCopy[beg_ind+j-2+m]*eRdt_d2;
} // END_OF: (j == (-jmax + 2))
else if(j == (-jmax + 1)) {
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PM_B(j - 1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: (j == (-jmax + 1))
else if(j == (-jmax)) {
pd = PD_A(j + 1, M);
pm = PD_B(j, M);
res = pd*QCopy[beg_ind+j+1+m]*eRdt_u1 + pm*QCopy[beg_ind+j+m]*eRdt;
} // END_OF: (-jmax)
else {
pd = PD_A(j + 1, M);
pm = PM_A(j, M);
pu = PU_A(j - 1, M);
res =
pd*QCopy[beg_ind+j+1+m]*eRdt_u1 +
pm*QCopy[beg_ind+j+m]*eRdt +
pu*QCopy[beg_ind+j-1+m]*eRdt_d1;
} // END_OF: default
return res;
}
/* backward propagation helper */
double backward_helper(double X, double M, double dr, double dt,
double alphai, double *CallCopy, int beg_ind, int m,
int i, int jmax, int j)
{
double eRdt = exp(-((double)(j)*dr + alphai)*dt);
double res;
double pu, pm, pd;
// define res in big if-statement
if (i < jmax) {
// central node
pu = PU_A(j, M);
pm = PM_A(j, M);
pd = PD_A(j, M);
res = (pu*CallCopy[beg_ind+j+m+1] +
pm*CallCopy[beg_ind+j+m] +
pd*CallCopy[beg_ind+j+m-1]) *
eRdt;
} else if(j == jmax) {
// top node
pu = PU_C(j, M);
pm = PM_C(j, M);
pd = PD_C(j, M);
res = (pu*CallCopy[beg_ind+j+m] +
pm*CallCopy[beg_ind+j+m-1] +
pd*CallCopy[beg_ind+j+m-2]) *
eRdt;
} else if(j == -jmax) {
// bottom node
pu = PU_B(j, M);
pm = PM_B(j, M);
pd = PD_B(j, M);
res = (pu*CallCopy[beg_ind+j+m+2] +
pm*CallCopy[beg_ind+j+m+1] +
pd*CallCopy[beg_ind+j+m]) *
eRdt;
} else {
// central node
pu = PU_A(j, M);
pm = PM_A(j, M);
pd = PD_A(j, M);
res = (pu*CallCopy[beg_ind+j+m+1] +
pm*CallCopy[beg_ind+j+m] +
pd*CallCopy[beg_ind+j+m-1]) *
eRdt;
}
// OBS: need to define your own max function
// The 3 is the length of contract. Here 3 years. Maybe parameterize it?
if(i == ((int)(3 / dt))) { res = max_for_doubles(X - res, 0); }
return res;
}
// START: Cosmin's
// int option_chunk_x = options_in_chunk[blockIdx.x];
// int *option_in_chunk_x = option_indices + blockIdx.x * max_options_in_chunk;
// double X = strikes[options_in_chunk[i]];
/* trinomial chunk kernel */
__global__ void trinom_chunk_kernel(double yield_curve,
double *strikes,
double *maturities,
double *reversion_rates,
double *volatilities,
int *num_termss,
int n_max, // maximum number of time steps
int *options_in_chunk, // size: [number_of_blocks]
int *option_indices, // size: [number_of_blocks][maxOptionsInChunk]
int max_options_in_chunk)
{
// computing option id
unsigned int lid = threadIdx.x;
int option_chunk = options_in_chunk[blockIdx.x];
int *options_in_chunk = option_indices + blockIdx.x * max_options_in_chunk;
int option_id = options_in_chunk[lid];
// computing option specific values
double X = strikes[option_id];
double T = maturities[option_id];
int n = num_termss[option_id];
double a = reversion_rates[option_id];
double sigma = volatilities[option_id];
double dt = T / ((double) n);
double V = sigma * sigma * (1 - (exp(0.0 - 2.0 * a * dt)) ) / (2.0 * a);
double dr = sqrt((1.0 + 2.0) * V);
double M = exp(0.0 - a * dt) - 1.0;
double jmax = ((int)(-0.184 / M)) + 1;
int m = jmax + 2;
if(lid < sum_of_qlens_in_block) { // maybe some guard here
// 1. forward iteration
for(int i=0; i<n_max; i++) {
if() { // guard because of __synthreads
}
// barrier because of dependency between q_{i} and q_{i+1}
__syncthreads();
}
// 2. backward iteration
for(int i=0; i<n_max; i++) {
if() { // guard because of __synthreads
}
// barrier because of dependency between c_{i-1} and c_{i}
__syncthreads();
}
} // END: lid < sum_of_qlens_in_block
}
int main()
{
// small.in - should be read from file
double strike[1] = 0.7965300572556244; // long double?
double maturity[1] = 9.0000;
int num_terms[1] = 108;
double reversion_rate[1] = 0.1000;
double volatility[1] = 0.0100;
// set maximum chunk size
int w = 256;
// start out with: (assuming that all options are equal)
// n_max := options[0].n
// m_max := options[0].m
// compute: chunks
// each thread should know here to read from when data is
// in global device memory
// copy data from host to device
// compute block and grid dimensions
// - block: (1, 1, w)
// - grid: (1, ceil(sum(Qlen) / w)
// execute kernel
// copy data from device to host
return 0;
}
|
f84496785de2caf8345e6f9ed8cac6e3b6c5c767.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
** Reduce noise by using Mean Filter with equal weights
*/
__global__ void MeanFilter ( uint *dst, int imageW, int imageH, int radius, float brightness, float contrast)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if(ix < imageW && iy < imageH){
float3 sum = {0,0,0};
float4 fresult = {0,0,0,0};
float count = 0.f;
for( float i = -radius; i <= radius; i++)
for( float j = -radius; j <= radius; j++)
{
fresult = tex2D(texImage, x + j, y + i);
//adjust contrast
float red = pow(fresult.x, contrast);
float green = pow(fresult.y, contrast);
float blue = pow(fresult.z, contrast);
//adjust brightness
red = red * (1.f - brightness) + brightness;
green = green * (1.f - brightness) + brightness;
blue = blue * (1.f - brightness) + brightness;
sum.x += red;
sum.y += green;
sum.z += blue;
count += 1.f;
}
sum.x /= count;
sum.y /= count;
sum.z /= count;
//Write final result to global memory
dst[imageW * iy + ix] = make_color(sum.x, sum.y, sum.z, 0);
}
}
extern "C" void meanFilterWrapper (uint *dst, int imageW, int imageH, int radius, float brightness, float contrast)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
hipLaunchKernelGGL(( MeanFilter), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, radius, brightness, contrast);
} | f84496785de2caf8345e6f9ed8cac6e3b6c5c767.cu | /*
** Reduce noise by using Mean Filter with equal weights
*/
__global__ void MeanFilter ( uint *dst, int imageW, int imageH, int radius, float brightness, float contrast)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
//Add half of a texel to always address exact texel centers
const float x = (float)ix + 0.5f;
const float y = (float)iy + 0.5f;
if(ix < imageW && iy < imageH){
float3 sum = {0,0,0};
float4 fresult = {0,0,0,0};
float count = 0.f;
for( float i = -radius; i <= radius; i++)
for( float j = -radius; j <= radius; j++)
{
fresult = tex2D(texImage, x + j, y + i);
//adjust contrast
float red = pow(fresult.x, contrast);
float green = pow(fresult.y, contrast);
float blue = pow(fresult.z, contrast);
//adjust brightness
red = red * (1.f - brightness) + brightness;
green = green * (1.f - brightness) + brightness;
blue = blue * (1.f - brightness) + brightness;
sum.x += red;
sum.y += green;
sum.z += blue;
count += 1.f;
}
sum.x /= count;
sum.y /= count;
sum.z /= count;
//Write final result to global memory
dst[imageW * iy + ix] = make_color(sum.x, sum.y, sum.z, 0);
}
}
extern "C" void meanFilterWrapper (uint *dst, int imageW, int imageH, int radius, float brightness, float contrast)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
MeanFilter<<<grid, threads>>>(dst, imageW, imageH, radius, brightness, contrast);
} |
c3df78c3a04a4df422a0f6ca5b97a57457979022.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/core/per_stream_pool.h" // NOLINT
#include <gtest/gtest.h>
#include <atomic>
#include <algorithm>
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include "dali/core/cuda_stream.h"
namespace dali {
void wait_func(void *pvflag) {
auto *flag = reinterpret_cast<std::atomic_flag*>(pvflag);
while (flag->test_and_set()) {
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
}
TEST(PerStreamPool, SingleStream) {
std::atomic_flag flag;
flag.test_and_set();
HIPStreamMasqueradingAsCUDA s1 = HIPStreamMasqueradingAsCUDA::Create(true);
HIPStreamMasqueradingAsCUDA ssync = HIPStreamMasqueradingAsCUDA::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerStreamPool<int> pool;
int *p1 = nullptr, *p2 = nullptr;
if (auto lease = pool.Get(s1)) {
p1 = lease;
}
hipLaunchHostFunc(ssync, wait_func, &flag);
hipEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
hipStreamSynchronize(s1); // make sure that stream has completed its job
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
hipStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object, even if job is still pending.";
}
flag.clear(); // unblock stream ssync
hipStreamSynchronize(ssync);
hipStreamSynchronize(s1);
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
}
}
TEST(PerDevicePool, SingleStreamNoReuse) {
std::atomic_flag flag;
flag.test_and_set();
HIPStreamMasqueradingAsCUDA s1 = HIPStreamMasqueradingAsCUDA::Create(true);
HIPStreamMasqueradingAsCUDA ssync = HIPStreamMasqueradingAsCUDA::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerDevicePool<int> pool;
int *p1 = nullptr, *p2 = nullptr, *p3 = nullptr;
if (auto lease = pool.Get(s1)) {
p1 = lease;
}
hipLaunchHostFunc(ssync, wait_func, &flag);
hipEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
hipStreamSynchronize(s1); // make sure that stream has completed its job
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
hipStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_NE(p2, p1) << "Expected to get a new object - job is still pending and reuse disabled.";
}
flag.clear(); // unblock stream ssync
hipStreamSynchronize(ssync);
hipStreamSynchronize(s1);
if (auto lease = pool.Get(s1)) {
p3 = lease;
EXPECT_TRUE(p3 == p1 || p3 == p2) << "Expected to get one of the previous objects.";
}
}
TEST(PerStreamPool, MultiStream) {
std::atomic_flag flag;
flag.test_and_set();
HIPStreamMasqueradingAsCUDA s1 = HIPStreamMasqueradingAsCUDA::Create(true);
HIPStreamMasqueradingAsCUDA s2 = HIPStreamMasqueradingAsCUDA::Create(true);
HIPStreamMasqueradingAsCUDA ssync = HIPStreamMasqueradingAsCUDA::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerStreamPool<int> pool;
int *p1 = nullptr, *p2 = nullptr, *p3 = nullptr, *p4 = nullptr;
if (auto lease = pool.Get(s1)) {
p1 = lease;
}
hipLaunchHostFunc(ssync, wait_func, &flag);
hipEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
hipStreamSynchronize(s1); // make sure that stream has completed its job
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
hipStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object, even if job is still pending.";
}
if (auto lease = pool.Get(s2)) {
p3 = lease;
EXPECT_NE(p3, p1) << "Expected to get a new object, job on s1 is still pending.";
}
flag.clear(); // unblock stream ssync
hipStreamSynchronize(ssync);
hipStreamSynchronize(s1);
hipStreamSynchronize(s2);
if (auto lease = pool.Get(s1)) {
p3 = lease;
EXPECT_TRUE(p3 == p1 || p3 == p2) << "Expected to get one of the previously objects.";
}
hipLaunchHostFunc(ssync, wait_func, &flag);
hipEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
if (auto lease = pool.Get(s1)) {
p4 = lease;
EXPECT_TRUE(p4 == p3) << "Expected to get one of the previously objects.";
hipStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s2)) {
p4 = lease;
EXPECT_TRUE(p4 != p3) << "Expected to get a different object.";
}
flag.clear();
hipStreamSynchronize(ssync);
}
TEST(PerStreamPool, Massive) {
std::atomic_flag flag;
flag.test_and_set();
int N = 100;
int niter = 10;
std::vector<HIPStreamMasqueradingAsCUDA> s(N);
std::vector<int *> p1(N), p2(N);
for (int i = 0; i < N; i++) {
s[i] = HIPStreamMasqueradingAsCUDA::Create(true);
}
HIPStreamMasqueradingAsCUDA ssync = HIPStreamMasqueradingAsCUDA::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerStreamPool<int> pool;
hipLaunchHostFunc(ssync, wait_func, &flag);
hipEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
volatile bool failure = false;
std::vector<std::thread> t(N);
for (int i = 0; i < N; i++) {
t[i] = std::thread([&, i]() {
for (int j = 0; j < niter; j++) {
if (auto lease = pool.Get(s[i])) {
if (j == 0) {
p1[i] = lease;
hipStreamWaitEvent(s[i], e, 0); // block s[i]
} else {
if (lease != p1[i]) {
std::cerr << "Failure in worker thread " << i
<< ": object not reused on same stream.";
failure = true;
break;
}
}
}
}
});
}
for (int i = 0; i < N; i++)
t[i].join();
EXPECT_FALSE(failure) << "Failure in worker thread";
flag.clear();
hipStreamSynchronize(ssync);
std::sort(p1.begin(), p1.end());
for (int i = 1; i < N; i++)
EXPECT_NE(p1[i], p1[i-1]) << "Duplicate object found - this shouldn't have happened";
for (int i = 0; i < N; i++)
hipStreamSynchronize(s[i]);
hipLaunchHostFunc(ssync, wait_func, &flag);
hipEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
for (int i = 0; i < N; i++) {
auto lease = pool.Get(s[i]);
hipStreamWaitEvent(s[i], e, 0); // block s[i]
p2[i] = lease;
}
flag.clear();
hipStreamSynchronize(ssync);
std::sort(p2.begin(), p2.end());
EXPECT_EQ(p1, p2) << "Should reuse all objects";
}
} // namespace dali
| c3df78c3a04a4df422a0f6ca5b97a57457979022.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/core/per_stream_pool.h" // NOLINT
#include <gtest/gtest.h>
#include <atomic>
#include <algorithm>
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include "dali/core/cuda_stream.h"
namespace dali {
void wait_func(void *pvflag) {
auto *flag = reinterpret_cast<std::atomic_flag*>(pvflag);
while (flag->test_and_set()) {
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
}
TEST(PerStreamPool, SingleStream) {
std::atomic_flag flag;
flag.test_and_set();
CUDAStream s1 = CUDAStream::Create(true);
CUDAStream ssync = CUDAStream::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerStreamPool<int> pool;
int *p1 = nullptr, *p2 = nullptr;
if (auto lease = pool.Get(s1)) {
p1 = lease;
}
cudaLaunchHostFunc(ssync, wait_func, &flag);
cudaEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
cudaStreamSynchronize(s1); // make sure that stream has completed its job
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
cudaStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object, even if job is still pending.";
}
flag.clear(); // unblock stream ssync
cudaStreamSynchronize(ssync);
cudaStreamSynchronize(s1);
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
}
}
TEST(PerDevicePool, SingleStreamNoReuse) {
std::atomic_flag flag;
flag.test_and_set();
CUDAStream s1 = CUDAStream::Create(true);
CUDAStream ssync = CUDAStream::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerDevicePool<int> pool;
int *p1 = nullptr, *p2 = nullptr, *p3 = nullptr;
if (auto lease = pool.Get(s1)) {
p1 = lease;
}
cudaLaunchHostFunc(ssync, wait_func, &flag);
cudaEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
cudaStreamSynchronize(s1); // make sure that stream has completed its job
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
cudaStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_NE(p2, p1) << "Expected to get a new object - job is still pending and reuse disabled.";
}
flag.clear(); // unblock stream ssync
cudaStreamSynchronize(ssync);
cudaStreamSynchronize(s1);
if (auto lease = pool.Get(s1)) {
p3 = lease;
EXPECT_TRUE(p3 == p1 || p3 == p2) << "Expected to get one of the previous objects.";
}
}
TEST(PerStreamPool, MultiStream) {
std::atomic_flag flag;
flag.test_and_set();
CUDAStream s1 = CUDAStream::Create(true);
CUDAStream s2 = CUDAStream::Create(true);
CUDAStream ssync = CUDAStream::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerStreamPool<int> pool;
int *p1 = nullptr, *p2 = nullptr, *p3 = nullptr, *p4 = nullptr;
if (auto lease = pool.Get(s1)) {
p1 = lease;
}
cudaLaunchHostFunc(ssync, wait_func, &flag);
cudaEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
cudaStreamSynchronize(s1); // make sure that stream has completed its job
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object.";
cudaStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s1)) {
p2 = lease;
EXPECT_EQ(p2, p1) << "Expected to get the same object, even if job is still pending.";
}
if (auto lease = pool.Get(s2)) {
p3 = lease;
EXPECT_NE(p3, p1) << "Expected to get a new object, job on s1 is still pending.";
}
flag.clear(); // unblock stream ssync
cudaStreamSynchronize(ssync);
cudaStreamSynchronize(s1);
cudaStreamSynchronize(s2);
if (auto lease = pool.Get(s1)) {
p3 = lease;
EXPECT_TRUE(p3 == p1 || p3 == p2) << "Expected to get one of the previously objects.";
}
cudaLaunchHostFunc(ssync, wait_func, &flag);
cudaEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
if (auto lease = pool.Get(s1)) {
p4 = lease;
EXPECT_TRUE(p4 == p3) << "Expected to get one of the previously objects.";
cudaStreamWaitEvent(s1, e, 0); // block s1
}
if (auto lease = pool.Get(s2)) {
p4 = lease;
EXPECT_TRUE(p4 != p3) << "Expected to get a different object.";
}
flag.clear();
cudaStreamSynchronize(ssync);
}
TEST(PerStreamPool, Massive) {
std::atomic_flag flag;
flag.test_and_set();
int N = 100;
int niter = 10;
std::vector<CUDAStream> s(N);
std::vector<int *> p1(N), p2(N);
for (int i = 0; i < N; i++) {
s[i] = CUDAStream::Create(true);
}
CUDAStream ssync = CUDAStream::Create(true);
CUDAEvent e = CUDAEvent::Create();
PerStreamPool<int> pool;
cudaLaunchHostFunc(ssync, wait_func, &flag);
cudaEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
volatile bool failure = false;
std::vector<std::thread> t(N);
for (int i = 0; i < N; i++) {
t[i] = std::thread([&, i]() {
for (int j = 0; j < niter; j++) {
if (auto lease = pool.Get(s[i])) {
if (j == 0) {
p1[i] = lease;
cudaStreamWaitEvent(s[i], e, 0); // block s[i]
} else {
if (lease != p1[i]) {
std::cerr << "Failure in worker thread " << i
<< ": object not reused on same stream.";
failure = true;
break;
}
}
}
}
});
}
for (int i = 0; i < N; i++)
t[i].join();
EXPECT_FALSE(failure) << "Failure in worker thread";
flag.clear();
cudaStreamSynchronize(ssync);
std::sort(p1.begin(), p1.end());
for (int i = 1; i < N; i++)
EXPECT_NE(p1[i], p1[i-1]) << "Duplicate object found - this shouldn't have happened";
for (int i = 0; i < N; i++)
cudaStreamSynchronize(s[i]);
cudaLaunchHostFunc(ssync, wait_func, &flag);
cudaEventRecord(e, ssync); // this event is recorded, but not reached, because this stream is
// waiting for a spinning host function
for (int i = 0; i < N; i++) {
auto lease = pool.Get(s[i]);
cudaStreamWaitEvent(s[i], e, 0); // block s[i]
p2[i] = lease;
}
flag.clear();
cudaStreamSynchronize(ssync);
std::sort(p2.begin(), p2.end());
EXPECT_EQ(p1, p2) << "Should reuse all objects";
}
} // namespace dali
|
d9e13b8956a97ee781858dba5b601d77b83a5bcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define MAX_NUM_BLOCKS 70
//#include "global_sync.cu"
#define WARP_SIZE 32
#define NUM_THREADS 512
#define NUM_WARPS (NUM_THREADS / WARP_SIZE)
#define LOG_NUM_THREADS 9
#define LOG_NUM_WARPS (LOG_NUM_THREADS - 5)
#define SCAN_STRIDE (WARP_SIZE + WARP_SIZE / 2 + 1)
__device__ volatile int inQueueSize[MAX_NUM_BLOCKS];
__device__ volatile int *inQueuePtr1[MAX_NUM_BLOCKS];
__device__ volatile int inQueueHead[MAX_NUM_BLOCKS];
__device__ volatile int outQueueMaxSize[MAX_NUM_BLOCKS];
__device__ volatile int outQueueHead[MAX_NUM_BLOCKS];
__device__ volatile int *outQueuePtr2[MAX_NUM_BLOCKS];
__device__ volatile int *curInQueue[MAX_NUM_BLOCKS];
__device__ volatile int *curOutQueue[MAX_NUM_BLOCKS];
__device__ volatile int execution_code;
// This variables are used for debugging purposes only
__device__ volatile int totalInserts[MAX_NUM_BLOCKS];
// Utils...
// http://www.moderngpu.com/intro/scan.html
__device__ void scan(const int* values, int* exclusive) {
// Reserve a half warp of extra space plus one per warp in the block.
// This is exactly enough space to avoid comparisons in the multiscan
// and to avoid bank conflicts.
__shared__ volatile int scan[NUM_WARPS * SCAN_STRIDE];
int tid = threadIdx.x;
int warp = tid / WARP_SIZE;
int lane = (WARP_SIZE - 1) & tid;
volatile int* s = scan + SCAN_STRIDE * warp + lane + WARP_SIZE / 2;
s[-16] = 0;
// Read from global memory.
int x = values[tid];
s[0] = x;
// Run inclusive scan on each warp's data.
int sum = x;
#pragma unroll
for(int i = 0; i < 5; ++i) {
int offset = 1<< i;
sum += s[-offset];
s[0] = sum;
}
// Synchronize to make all the totals available to the reduction code.
__syncthreads();
__shared__ volatile int totals[NUM_WARPS + NUM_WARPS / 2];
if(tid < NUM_WARPS) {
// Grab the block total for the tid'th block. This is the last element
// in the block's scanned sequence. This operation avoids bank
// conflicts.
int total = scan[SCAN_STRIDE * tid + WARP_SIZE / 2 + WARP_SIZE - 1];
totals[tid] = 0;
volatile int* s2 = totals + NUM_WARPS / 2 + tid;
int totalsSum = total;
s2[0] = total;
#pragma unroll
for(int i = 0; i < LOG_NUM_WARPS; ++i) {
int offset = 1<< i;
totalsSum += s2[-offset];
s2[0] = totalsSum;
}
// Subtract total from totalsSum for an exclusive scan.
totals[tid] = totalsSum - total;
}
// Synchronize to make the block scan available to all warps.
__syncthreads();
// Add the block scan to the inclusive sum for the block.
sum += totals[warp];
// Write the inclusive and exclusive scans to global memory.
// inclusive[tid] = sum;
exclusive[tid] = sum - x;
}
__device__ int queueElement(int *outQueueCurPtr, int *elements){
int queue_index = atomicAdd((int*)&outQueueHead[blockIdx.x], 1);
if(queue_index < outQueueMaxSize[blockIdx.x]){
curOutQueue[blockIdx.x][queue_index] = elements[0];
}else{
queue_index = -1;
}
return queue_index;
}
// Assuming that all threads in a block are calling this function
__device__ int queueElement(int *elements){
int queue_index = -1;
#ifdef PREFIX_SUM
__shared__ int writeAddr[NUM_THREADS];
__shared__ int exclusiveScan[NUM_THREADS];
__shared__ int global_queue_index;
if(threadIdx.x == 0){
global_queue_index = outQueueHead[blockIdx.x];
}
// set to the number of values this threard is writing
writeAddr[threadIdx.x] = elements[0];
// run a prefix-sum on threads inserting data to the queue
scan(writeAddr, exclusiveScan);
// calculate index into the queue where given thread is writing
queue_index = global_queue_index+exclusiveScan[threadIdx.x];
// write elemets sequentially to shared memory
// int localIndex = exclusiveScan[threadIdx.x];
// for(int i = 0; i < elements[0]; i++){
// localElements[localIndex+i] = elements[i+1];
// }
// __syncthreads();
// for(int i = threadIdx.x; i < exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1]; i+=blockDim.x){
// curOutQueue[blockIdx.x][global_queue_index+i] = localElements[i];
// }
for(int i = 0; i < elements[0]; i++){
// If the queue storage has been exceed, than set the execution code to 1.
// This will force a second round in the morphological reconstructio.
if(queue_index+i >= outQueueMaxSize[blockIdx.x]){
// printf("List out of bounds\n");
execution_code=1;
}else{
curOutQueue[blockIdx.x][queue_index+i] = elements[i+1];
}
}
// thread 0 updates head of the queue
if(threadIdx.x == 0){
outQueueHead[blockIdx.x]+=exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1];
if(outQueueHead[blockIdx.x] >= outQueueMaxSize[blockIdx.x]){
outQueueHead[blockIdx.x] = outQueueMaxSize[blockIdx.x];
}
// printf("Inserting = %d - outQueueHead = %d\n", exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1], outQueueHead[blockIdx.x]);
}
#else
if(elements[0] != 0){
queue_index = atomicAdd((int*)&outQueueHead[blockIdx.x], elements[0]);
if(queue_index < outQueueMaxSize[blockIdx.x]){
for(int i = 0; i < elements[0];i++){
curOutQueue[blockIdx.x][queue_index+i] = elements[i+1];
}
}else{
queue_index = -1;
}
}
#endif
return queue_index;
}
// Assuming that all threads in a block are calling this function
__device__ int queueElement(int element){
int queue_index = -1;
#ifdef PREFIX_SUM
__shared__ int writeAddr[NUM_THREADS];
__shared__ int exclusiveScan[NUM_THREADS];
__shared__ int global_queue_index;
if(threadIdx.x == 0){
global_queue_index = outQueueHead[blockIdx.x];
}
// set to 1 threards that are writing
writeAddr[threadIdx.x] = ((element) != (-1) ? (1):(0));
// run a prefix-sum on threads inserting data to the queue
scan(writeAddr, exclusiveScan);
// calculate index into the queue where give thread is writing
queue_index = global_queue_index+exclusiveScan[threadIdx.x];
// If there is data to be queued, do it
if(element != -1){
curOutQueue[blockIdx.x][queue_index] = element;
}
// thread 0 updates head of the queue
if(threadIdx.x == 0){
outQueueHead[blockIdx.x]+=exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1];
}
#else
if(element != -1){
queue_index = atomicAdd((int*)&outQueueHead[blockIdx.x], 1);
if(queue_index < outQueueMaxSize[blockIdx.x]){
curOutQueue[blockIdx.x][queue_index] = element;
}else{
queue_index = -1;
}
}
#endif
return queue_index;
}
// Makes queue 1 point to queue 2, and vice-versa
__device__ void swapQueues(int loopIt){
__syncthreads();
if(loopIt %2 == 0){
curInQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
curOutQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
if(threadIdx.x == 0){
inQueueSize[blockIdx.x] = outQueueHead[blockIdx.x];
outQueueHead[blockIdx.x] = 0;
inQueueHead[blockIdx.x] = 0;
// This is used for profiling only
totalInserts[blockIdx.x]+=inQueueSize[blockIdx.x];
}
}else{
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
if(threadIdx.x == 0){
inQueueSize[blockIdx.x] = outQueueHead[blockIdx.x];
outQueueHead[blockIdx.x] = 0;
inQueueHead[blockIdx.x] = 0;
// This is used for profiling only
totalInserts[blockIdx.x]+=inQueueSize[blockIdx.x];
}
}
__syncthreads();
}
// -2, nothing else to be done at all
__device__ int dequeueElement(int *loopIt){
// did this block got something to do?
__shared__ volatile int gotWork;
getWork:
gotWork = 0;
// Try to get some work.
// int queue_index = atomicAdd((int*)&inQueueHead, 1);
int queue_index = inQueueHead[blockIdx.x] + threadIdx.x;
// I must guarantee that idle threads are set to 0, and no other thread
// will come later and set it to 0 again
__syncthreads();
if(threadIdx.x == 0){
inQueueHead[blockIdx.x]+=blockDim.x;
// if(loopIt[0] < 1){
// printf("inQueueSize = %d loopIt[0] = %d queue_index = %d outQueueHead = %d\n", inQueueSize[blockIdx.x], loopIt[0], queue_index, outQueueHead[blockIdx.x]);
// }
}
// Nothing to do by default
int element = -1;
if(queue_index < inQueueSize[blockIdx.x]){
element = curInQueue[blockIdx.x][queue_index];
gotWork = 1;
}
__syncthreads();
// This block does not have anything to process
if(!gotWork){
// if(loopIt[0] < 20 && threadIdx.x == 0)
// printf("inQueueSize = %d loopIt[0] = %d\n", inQueueSize[blockIdx.x], loopIt[0]);
element = -2;
if(outQueueHead[blockIdx.x] != 0){
swapQueues(loopIt[0]);
loopIt[0]++;
goto getWork;
}
}
return element;
}
// Initialized queue data structures:
// Initial assumptions: this first kernel should be launched with number of threads at least equal
// to the number of block used with the second kernel
// inQueueData ptr size is same as outQueueMaxSize provided.
__global__ void initQueue(int *inQueueData, int dataElements, int *outQueueData, int outMaxSize){
if(threadIdx.x < 1){
// Simply assign input data pointers/number of elements to the queue
inQueuePtr1[threadIdx.x] = inQueueData;
// printf("initQueueVector: tid - %d dataElements = %d pointer = %p\n", threadIdx.x, dataElements, inQueueData);
inQueueSize[threadIdx.x] = dataElements;
totalInserts[threadIdx.x] = 0;
// alloc second vector used to queue output elements
outQueuePtr2[threadIdx.x] = outQueueData;
// Maximum number of elements that fit into the queue
outQueueMaxSize[threadIdx.x] = outMaxSize;
// Head of the out queue
outQueueHead[threadIdx.x] = 0;
// Head of the in queue
inQueueHead[threadIdx.x] = 0;
}
}
__global__ void initQueueId(int *inQueueData, int dataElements, int *outQueueData, int outMaxSize, int qId){
if(threadIdx.x < 1){
// Simply assign input data pointers/number of elements to the queue
inQueuePtr1[qId] = inQueueData;
// printf("initQueueVector: tid - %d dataElements = %d pointer = %p\n", threadIdx.x, dataElements, inQueueData);
inQueueSize[qId] = dataElements;
totalInserts[qId] = 0;
// alloc second vector used to queue output elements
outQueuePtr2[qId] = outQueueData;
// Maximum number of elements that fit into the queue
outQueueMaxSize[qId] = outMaxSize;
// Head of the out queue
outQueueHead[qId] = 0;
// Head of the in queue
inQueueHead[qId] = 0;
execution_code=0;
}
}
__global__ void initQueueVector(int **inQueueData, int *inQueueSizes, int **outQueueData, int numImages){
if(threadIdx.x < MAX_NUM_BLOCKS && threadIdx.x < numImages){
// printf("initQueueVector: tid - %d inQueueSize[%d] = %d pointer = %p outPtr = %p\n", threadIdx.x, threadIdx.x, inQueueSizes[threadIdx.x], inQueueData[threadIdx.x], outQueueData[threadIdx.x]);
// Simply assign input data pointers/number of elements to the queue
inQueuePtr1[threadIdx.x] = inQueueData[threadIdx.x];
inQueueSize[threadIdx.x] = inQueueSizes[threadIdx.x];
totalInserts[threadIdx.x] = 0;
// alloc second vector used to queue output elements
outQueuePtr2[threadIdx.x] = outQueueData[threadIdx.x];
// Maximum number of elements that fit into the queue
outQueueMaxSize[threadIdx.x] = (inQueueSizes[threadIdx.x]+1000) * 2;
// Head of the out queue
outQueueHead[threadIdx.x] = 0;
// Head of the in queue
inQueueHead[threadIdx.x] = 0;
}
}
// Returns what should be queued
__device__ int propagate(int *seeds, unsigned char *image, int x, int y, int ncols, unsigned char pval){
int returnValue = -1;
int index = y*ncols + x;
unsigned char seedXYval = seeds[index];
unsigned char imageXYval = image[index];
if((seedXYval < pval) && (imageXYval != seedXYval)){
unsigned char newValue = min(pval, imageXYval);
// this should be a max atomic...
atomicMax(&(seeds[index]), newValue);
returnValue = index;
}
return returnValue;
}
__global__ void listReduceKernel(int* d_Result, int *seeds, unsigned char *image, int ncols, int nrows){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][5];
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >= 0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[0]=totalInserts[blockIdx.x];
}
extern "C" int listComputation(int *h_Data, int dataElements, int *d_seeds, unsigned char *d_image, int ncols, int nrows){
// seeds contais the maker and it is also the output image
// uint threadsX = 512;
int blockNum = 1;
int *d_Result;
int *d_Data;
unsigned int dataSize = dataElements * sizeof(int);
hipMalloc((void **)&d_Data, dataSize );
hipMemcpy(d_Data, h_Data, dataSize, hipMemcpyHostToDevice);
// alloc space to save output elements in the queue
int *d_OutVector;
hipMalloc((void **)&d_OutVector, sizeof(int) * dataElements);
// printf("Init queue data!\n");
// init values of the __global__ variables used by the queue
hipLaunchKernelGGL(( initQueue), dim3(1), dim3(1), 0, 0, d_Data, dataElements, d_OutVector, dataElements);
// init_sync<<<1, 1>>>();
hipMalloc((void **)&d_Result, sizeof(int) ) ;
hipMemset((void *)d_Result, 0, sizeof(int));
// printf("Run computation kernel!\n");
hipLaunchKernelGGL(( listReduceKernel), dim3(blockNum), dim3(NUM_THREADS), 0, 0, d_Result, d_seeds, d_image, ncols, nrows);
// cutilCheckMsg("histogramKernel() execution failed\n");
int h_Result;
hipMemcpy(&h_Result, d_Result, sizeof(int), hipMemcpyDeviceToHost);
printf(" #queue entries = %d\n",h_Result);
hipFree(d_Data);
hipFree(d_Result);
hipFree(d_OutVector);
// TODO: free everyone
return h_Result;
}
__global__ void morphReconKernel(int* d_Result, int *seeds, unsigned char *image, int ncols, int nrows){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][5];
// printf("inQueueSize = %d\n",inQueueSize[blockIdx.x]);
__syncthreads();
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >=0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit >= 0 && y > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && y < nrows-1){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[0]=totalInserts[blockIdx.x];
}
__global__ void morphReconKernelVector(int* d_Result, int **d_SeedsList, unsigned char **d_ImageList, int *d_ncols, int *d_nrows, int connectivity=4){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
// if(threadIdx.x == 0){
// printf("inqueue = %p outqueue = %p ncols = %d nrows = %d connectivity=%d\n", inQueuePtr1[blockIdx.x], outQueuePtr2[blockIdx.x], d_ncols[blockIdx.x], d_nrows[blockIdx.x], connectivity);
// }
int *seeds = d_SeedsList[blockIdx.x];
unsigned char *image = d_ImageList[blockIdx.x];
int ncols = d_ncols[blockIdx.x];
int nrows = d_nrows[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][9];
__syncthreads();
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >= 0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit >= 0 && y > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && y < nrows-1){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// if connectivity is 8, four other neighbors have to be verified
if(connectivity == 8){
if(workUnit >= 0 && y > 0 && x >0){
retWork = propagate((int*)seeds, image, x-1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y > 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x >0){
retWork = propagate((int*)seeds, image, x-1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x <(ncols-1)){
retWork = propagate((int*)seeds, image, x+1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
}
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[blockIdx.x]=totalInserts[blockIdx.x];
}
/// This is an old implementation for this function. Presumably about 1ms faster, but quite more ugly
///extern "C" int morphReconVector(int nImages, int **h_InputListPtr, int* h_ListSize, int **h_Seeds, unsigned char **h_Images, int* h_ncols, int* h_nrows){
///// seeds contais the maker and it is also the output image
/// int blockNum = nImages;
/// int *d_Result;
///
/// // alloc space to save output elements in the queue
/// int **h_OutQueuePtr = (int **)malloc(sizeof(int*) * nImages);;
///
/// for(int i = 0; i < nImages;i++){
/// hipMalloc((void **)&h_OutQueuePtr[i], sizeof(int) * (h_ListSize[i]+1000) * 2);
/// }
///
/// int **d_OutQueuePtr = NULL;
/// hipMalloc((void **)&d_OutQueuePtr, sizeof(int*) * nImages);
/// hipMemcpy(d_OutQueuePtr, h_OutQueuePtr, sizeof(int*) * nImages, hipMemcpyHostToDevice);
///
///
///
/// printf("nImages = %d\n", nImages);
///
/// int **d_InputListPtr = NULL;
/// hipMalloc((void **)&d_InputListPtr, sizeof(int*) * nImages);
/// hipMemcpy(d_InputListPtr, h_InputListPtr, sizeof(int*) * nImages, hipMemcpyHostToDevice);
///
///
/// int *d_ListSize = NULL;
/// hipMalloc((void **)&d_ListSize, sizeof(int) * nImages);
/// hipMemcpy(d_ListSize, h_ListSize, sizeof(int) * nImages, hipMemcpyHostToDevice);
///
/// // init values of the __global__ variables used by the queue
/// initQueueVector<<<1, nImages>>>(d_InputListPtr, d_ListSize, d_OutQueuePtr, nImages);
///
/// hipMalloc((void **)&d_Result, sizeof(int)*nImages) ;
/// hipMemset((void *)d_Result, 0, sizeof(int)*nImages);
///
/// int **d_Seeds = NULL;
/// hipMalloc((void **)&d_Seeds, sizeof(int*) * nImages);
/// hipMemcpy(d_Seeds, h_Seeds, sizeof(int*) * nImages, hipMemcpyHostToDevice);
///
/// unsigned char **d_Images = NULL;
/// hipMalloc((void **)&d_Images, sizeof(unsigned char*) * nImages);
/// hipMemcpy(d_Images, h_Images, sizeof(unsigned char*) * nImages, hipMemcpyHostToDevice);
///
/// int *d_ncols = NULL;
/// hipMalloc((void **)&d_ncols, sizeof(int) * nImages);
/// hipMemcpy(d_ncols, h_ncols, sizeof(int) * nImages, hipMemcpyHostToDevice);
///
/// int *d_nrows = NULL;
/// hipMalloc((void **)&d_nrows, sizeof(int) * nImages);
/// hipMemcpy(d_nrows, h_nrows, sizeof(int) * nImages, hipMemcpyHostToDevice);
///
/// printf("Run computation kernel!\n");
///hipLaunchKernelGGL(( morphReconKernelVector), dim3(blockNum), dim3(NUM_THREADS), 0, 0, d_Result, d_Seeds, d_Images, d_ncols, d_nrows);
///
/// hipError_t errorCode = hipGetLastError();
/// const char *error = hipGetErrorString(errorCode);
/// printf("Error after morphRecon = %s\n", error);
///
/// int h_Result;
/// hipMemcpy(&h_Result, d_Result, sizeof(int), hipMemcpyDeviceToHost);
///
/// printf(" #queue entries = %d\n",h_Result);
///
////// hipFree(d_nrows);
////// hipFree(d_ncols);
////// hipFree(d_Images);
////// hipFree(d_Seeds);
////// hipFree(d_InputListPtr);
////// hipFree(d_ListSize);
////// hipFree(d_Result);
////// hipFree(d_OutQueuePtr);
////// for(int i = 0; i < nImages; i++){
////// hipFree(h_OutQueuePtr[i]);
////// hipFree(h_InputListPtr[i]);
////// }
////// free(h_OutQueuePtr);
///
/// // TODO: free everyone
/// return h_Result;
///}
extern "C" int morphReconVector(int nImages, int **h_InputListPtr, int* h_ListSize, int **h_Seeds, unsigned char **h_Images, int* h_ncols, int* h_nrows, int connectivity){
// seeds contais the maker and it is also the output image
int blockNum = nImages;
int *d_Result;
// alloc space to save output elements in the queue
int **h_OutQueuePtr = (int **)malloc(sizeof(int*) * nImages);;
for(int i = 0; i < nImages;i++){
hipMalloc((void **)&h_OutQueuePtr[i], sizeof(int) * (h_ListSize[i]+1000) * 2);
}
// Init queue for each images. yes, this may not be the most efficient way, but the code is far easier to read.
// Another version, where all pointer are copied at once to the GPU was also built, buit it was only about 1ms
// faster. Thus, we decide to go with this version
for(int i = 0; i < nImages;i++)
hipLaunchKernelGGL(( initQueueId), dim3(1), dim3(1), 0, 0, h_InputListPtr[i], h_ListSize[i], h_OutQueuePtr[i], (h_ListSize[i]+1000) *2, i);
hipMalloc((void **)&d_Result, sizeof(int)*nImages) ;
hipMemset((void *)d_Result, 0, sizeof(int)*nImages);
int **d_Seeds = NULL;
hipMalloc((void **)&d_Seeds, sizeof(int*) * nImages);
hipMemcpy(d_Seeds, h_Seeds, sizeof(int*) * nImages, hipMemcpyHostToDevice);
unsigned char **d_Images = NULL;
hipMalloc((void **)&d_Images, sizeof(unsigned char*) * nImages);
hipMemcpy(d_Images, h_Images, sizeof(unsigned char*) * nImages, hipMemcpyHostToDevice);
int *d_ncols = NULL;
hipMalloc((void **)&d_ncols, sizeof(int) * nImages);
hipMemcpy(d_ncols, h_ncols, sizeof(int) * nImages, hipMemcpyHostToDevice);
int *d_nrows = NULL;
hipMalloc((void **)&d_nrows, sizeof(int) * nImages);
hipMemcpy(d_nrows, h_nrows, sizeof(int) * nImages, hipMemcpyHostToDevice);
// printf("Run computation kernel!\n");
hipLaunchKernelGGL(( morphReconKernelVector), dim3(blockNum), dim3(NUM_THREADS), 0, 0, d_Result, d_Seeds, d_Images, d_ncols, d_nrows, connectivity);
if(hipGetLastError() != hipSuccess){
hipError_t errorCode = hipGetLastError();
const char *error = hipGetErrorString(errorCode);
printf("Error after morphRecon = %s\n", error);
}
int *h_Result = (int *) malloc(sizeof(int) * blockNum);
hipMemcpy(h_Result, d_Result, sizeof(int) * blockNum, hipMemcpyDeviceToHost);
int resutRet = h_Result[0];
// printf(" #queue entries = %d\n",h_Result[0]);
free(h_Result);
hipFree(d_nrows);
hipFree(d_ncols);
hipFree(d_Images);
hipFree(d_Seeds);
hipFree(d_Result);
for(int i = 0; i < nImages; i++){
hipFree(h_OutQueuePtr[i]);
hipFree(h_InputListPtr[i]);
}
free(h_OutQueuePtr);
return resutRet;
}
__global__ void morphReconKernelSpeedup(int* d_Result, int *d_Seeds, unsigned char *d_Image, int ncols, int nrows, int connectivity=4){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
int *seeds = d_Seeds;
unsigned char *image = d_Image;
// if(threadIdx.x == 0){
// printf("inqueue = %p outqueue = %p ncols = %d nrows = %d connectivity=%d\n", inQueuePtr1[blockIdx.x], outQueuePtr2[blockIdx.x], ncols, nrows, connectivity);
// }
// int *seeds = d_SeedsList[blockIdx.x];
// unsigned char *image = d_ImageList[blockIdx.x];
// int ncols = d_ncols[blockIdx.x];
// int nrows = d_nrows[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][9];
__syncthreads();
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >= 0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit >= 0 && y > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && y < nrows-1){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// if connectivity is 8, four other neighbors have to be verified
if(connectivity == 8){
if(workUnit >= 0 && y > 0 && x >0){
retWork = propagate((int*)seeds, image, x-1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y > 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x >0){
retWork = propagate((int*)seeds, image, x-1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x <(ncols-1)){
retWork = propagate((int*)seeds, image, x+1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
}
// queueElement(retWork);
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[blockIdx.x]=totalInserts[blockIdx.x];
if(execution_code!=0){
d_Result[gridDim.x]=1;
}
}
extern "C" int morphReconSpeedup( int *g_InputListPtr, int h_ListSize, int *g_Seed, unsigned char *g_Image, int h_ncols, int h_nrows, int connectivity, int nBlocks, float queue_increase_factor){
// seeds contais the maker and it is also the output image
// int nImages = 1;
// TODO: change blockNum to nBlocks
// int nBlocks = nImages;
int *d_Result;
// int *d_return_code;
// float queue_increase_factor = 1.1;
// int nBlocks = 28;
// printf("nBlocks=%d\n",nBlocks);
// alloc space to save output elements in the queue for each block
int **h_OutQueuePtr = (int **)malloc(sizeof(int*) * nBlocks);
// at this moment I should partition the INPUT queue
// printf("List size = %d\n", h_ListSize);
int tempNblocks = nBlocks;
int subListsInit[tempNblocks];
// int subListsEnd[tempNblocks];
int subListsSize[tempNblocks];
for(int i = 0; i < tempNblocks; i++){
int curSubListInit = (h_ListSize/tempNblocks)*i;
int curSubListEnd = ((i+1<tempNblocks)?((i+1)*(h_ListSize/tempNblocks)-1):(h_ListSize-1));
// printf("BlockId = %d - init = %d end = %d size=%d\n", i, curSubListInit, curSubListEnd, curSubListEnd-curSubListInit+1);
subListsInit[i] = curSubListInit;
// subListsEnd[i] = curSubListEnd;
subListsSize[i] = curSubListEnd-curSubListInit+1;
}
// Adding code
// TODO: free data
int *blockSubLists[tempNblocks];
for(int i = 0; i < tempNblocks; i++){
hipMalloc((void **)&blockSubLists[i], sizeof(int)*(subListsSize[i]) * queue_increase_factor);
hipMemcpy(blockSubLists[i], &g_InputListPtr[subListsInit[i]], subListsSize[i] * sizeof(int), hipMemcpyDeviceToDevice);
}
// End adding code
// printf("h_listSize = %d subListsSize[0]=%d\n", h_ListSize, subListsSize[0]);
// cout << "h_listSize = "<< h_ListSize<< " subListsSize[0]="<< subListsSize[0] <<endl;
for(int i = 0; i < tempNblocks;i++){
hipMalloc((void **)&h_OutQueuePtr[i], sizeof(int) * (subListsSize[i]) * queue_increase_factor);
}
// Init queue for each image. yes, this may not be the most efficient way, but the code is far easier to read.
// Another version, where all pointer are copied at once to the GPU was also built, buit it was only about 1ms
// faster. Thus, we decide to go with this version
// for(int i = 0; i < nBlocks;i++)
// hipLaunchKernelGGL(( initQueueId), dim3(1), dim3(1), 0, 0, h_InputListPtr[i], h_ListSize[i], h_OutQueuePtr[i], (h_ListSize[i]+1000) *2, i);
for(int i = 0; i < nBlocks;i++)
hipLaunchKernelGGL(( initQueueId), dim3(1), dim3(1), 0, 0, blockSubLists[i], subListsSize[i], h_OutQueuePtr[i], (subListsSize[i]) *queue_increase_factor, i);
// hipLaunchKernelGGL(( initQueueId), dim3(1), dim3(1), 0, 0, g_InputListPtr, h_ListSize, h_OutQueuePtr[i], (h_ListSize+1000) *2, i);
// This is used by each block to store the number of queue operations performed
hipMalloc((void **)&d_Result, sizeof(int)*(nBlocks+1)) ;
hipMemset((void *)d_Result, 0, sizeof(int)*(nBlocks+1));
// printf("Run computation kernel!\n");
hipLaunchKernelGGL(( morphReconKernelSpeedup), dim3(nBlocks), dim3(NUM_THREADS), 0, 0, d_Result, g_Seed, g_Image, h_ncols, h_nrows, connectivity);
if(hipGetLastError() != hipSuccess){
hipError_t errorCode = hipGetLastError();
const char *error = hipGetErrorString(errorCode);
printf("Error after morphRecon = %s\n", error);
}
int *h_Result = (int *) malloc(sizeof(int) * (nBlocks+1));
hipMemcpy(h_Result, d_Result, sizeof(int) * (nBlocks+1), hipMemcpyDeviceToHost);
int resutRet = h_Result[nBlocks];
// for(int i = 0; i < nBlocks; i++){
// printf(" block# %d, #entries=%d\n", i, h_Result[i]);
// }
// printf(" Exec. Error code = %d\n", h_Result[nBlocks]);
free(h_Result);
hipFree(d_Result);
for(int i = 0; i < nBlocks; i++){
hipFree(h_OutQueuePtr[i]);
}
free(h_OutQueuePtr);
hipFree(g_InputListPtr);
return resutRet;
}
extern "C" int morphRecon(int *d_input_list, int dataElements, int *d_seeds, unsigned char *d_image, int ncols, int nrows){
// seeds contais the maker and it is also the output image
int blockNum = 1;
int *d_Result;
// alloc space to save output elements in the queue
int *d_OutVector;
hipMalloc((void **)&d_OutVector, sizeof(int) * (dataElements+1000) * 2 );
// init values of the __global__ variables used by the queue
hipLaunchKernelGGL(( initQueue), dim3(1), dim3(1), 0, 0, d_input_list, dataElements, d_OutVector, (dataElements+1000) * 2);
hipMalloc((void **)&d_Result, sizeof(int) ) ;
hipMemset((void *)d_Result, 0, sizeof(int));
// printf("Run computation kernel!\n");
hipLaunchKernelGGL(( morphReconKernel), dim3(blockNum), dim3(NUM_THREADS), 0, 0, d_Result, d_seeds, d_image, ncols, nrows);
hipError_t errorCode = hipGetLastError();
const char *error = hipGetErrorString(errorCode);
printf("Error after morphRecon = %s\n", error);
int h_Result;
hipMemcpy(&h_Result, d_Result, sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf(" #queue entries = %d\n",h_Result);
hipFree(d_Result);
hipFree(d_OutVector);
// TODO: free everyone
return h_Result;
}
| d9e13b8956a97ee781858dba5b601d77b83a5bcb.cu | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define MAX_NUM_BLOCKS 70
//#include "global_sync.cu"
#define WARP_SIZE 32
#define NUM_THREADS 512
#define NUM_WARPS (NUM_THREADS / WARP_SIZE)
#define LOG_NUM_THREADS 9
#define LOG_NUM_WARPS (LOG_NUM_THREADS - 5)
#define SCAN_STRIDE (WARP_SIZE + WARP_SIZE / 2 + 1)
__device__ volatile int inQueueSize[MAX_NUM_BLOCKS];
__device__ volatile int *inQueuePtr1[MAX_NUM_BLOCKS];
__device__ volatile int inQueueHead[MAX_NUM_BLOCKS];
__device__ volatile int outQueueMaxSize[MAX_NUM_BLOCKS];
__device__ volatile int outQueueHead[MAX_NUM_BLOCKS];
__device__ volatile int *outQueuePtr2[MAX_NUM_BLOCKS];
__device__ volatile int *curInQueue[MAX_NUM_BLOCKS];
__device__ volatile int *curOutQueue[MAX_NUM_BLOCKS];
__device__ volatile int execution_code;
// This variables are used for debugging purposes only
__device__ volatile int totalInserts[MAX_NUM_BLOCKS];
// Utils...
// http://www.moderngpu.com/intro/scan.html
__device__ void scan(const int* values, int* exclusive) {
// Reserve a half warp of extra space plus one per warp in the block.
// This is exactly enough space to avoid comparisons in the multiscan
// and to avoid bank conflicts.
__shared__ volatile int scan[NUM_WARPS * SCAN_STRIDE];
int tid = threadIdx.x;
int warp = tid / WARP_SIZE;
int lane = (WARP_SIZE - 1) & tid;
volatile int* s = scan + SCAN_STRIDE * warp + lane + WARP_SIZE / 2;
s[-16] = 0;
// Read from global memory.
int x = values[tid];
s[0] = x;
// Run inclusive scan on each warp's data.
int sum = x;
#pragma unroll
for(int i = 0; i < 5; ++i) {
int offset = 1<< i;
sum += s[-offset];
s[0] = sum;
}
// Synchronize to make all the totals available to the reduction code.
__syncthreads();
__shared__ volatile int totals[NUM_WARPS + NUM_WARPS / 2];
if(tid < NUM_WARPS) {
// Grab the block total for the tid'th block. This is the last element
// in the block's scanned sequence. This operation avoids bank
// conflicts.
int total = scan[SCAN_STRIDE * tid + WARP_SIZE / 2 + WARP_SIZE - 1];
totals[tid] = 0;
volatile int* s2 = totals + NUM_WARPS / 2 + tid;
int totalsSum = total;
s2[0] = total;
#pragma unroll
for(int i = 0; i < LOG_NUM_WARPS; ++i) {
int offset = 1<< i;
totalsSum += s2[-offset];
s2[0] = totalsSum;
}
// Subtract total from totalsSum for an exclusive scan.
totals[tid] = totalsSum - total;
}
// Synchronize to make the block scan available to all warps.
__syncthreads();
// Add the block scan to the inclusive sum for the block.
sum += totals[warp];
// Write the inclusive and exclusive scans to global memory.
// inclusive[tid] = sum;
exclusive[tid] = sum - x;
}
__device__ int queueElement(int *outQueueCurPtr, int *elements){
int queue_index = atomicAdd((int*)&outQueueHead[blockIdx.x], 1);
if(queue_index < outQueueMaxSize[blockIdx.x]){
curOutQueue[blockIdx.x][queue_index] = elements[0];
}else{
queue_index = -1;
}
return queue_index;
}
// Assuming that all threads in a block are calling this function
__device__ int queueElement(int *elements){
int queue_index = -1;
#ifdef PREFIX_SUM
__shared__ int writeAddr[NUM_THREADS];
__shared__ int exclusiveScan[NUM_THREADS];
__shared__ int global_queue_index;
if(threadIdx.x == 0){
global_queue_index = outQueueHead[blockIdx.x];
}
// set to the number of values this threard is writing
writeAddr[threadIdx.x] = elements[0];
// run a prefix-sum on threads inserting data to the queue
scan(writeAddr, exclusiveScan);
// calculate index into the queue where given thread is writing
queue_index = global_queue_index+exclusiveScan[threadIdx.x];
// write elemets sequentially to shared memory
// int localIndex = exclusiveScan[threadIdx.x];
// for(int i = 0; i < elements[0]; i++){
// localElements[localIndex+i] = elements[i+1];
// }
// __syncthreads();
// for(int i = threadIdx.x; i < exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1]; i+=blockDim.x){
// curOutQueue[blockIdx.x][global_queue_index+i] = localElements[i];
// }
for(int i = 0; i < elements[0]; i++){
// If the queue storage has been exceed, than set the execution code to 1.
// This will force a second round in the morphological reconstructio.
if(queue_index+i >= outQueueMaxSize[blockIdx.x]){
// printf("List out of bounds\n");
execution_code=1;
}else{
curOutQueue[blockIdx.x][queue_index+i] = elements[i+1];
}
}
// thread 0 updates head of the queue
if(threadIdx.x == 0){
outQueueHead[blockIdx.x]+=exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1];
if(outQueueHead[blockIdx.x] >= outQueueMaxSize[blockIdx.x]){
outQueueHead[blockIdx.x] = outQueueMaxSize[blockIdx.x];
}
// printf("Inserting = %d - outQueueHead = %d\n", exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1], outQueueHead[blockIdx.x]);
}
#else
if(elements[0] != 0){
queue_index = atomicAdd((int*)&outQueueHead[blockIdx.x], elements[0]);
if(queue_index < outQueueMaxSize[blockIdx.x]){
for(int i = 0; i < elements[0];i++){
curOutQueue[blockIdx.x][queue_index+i] = elements[i+1];
}
}else{
queue_index = -1;
}
}
#endif
return queue_index;
}
// Assuming that all threads in a block are calling this function
__device__ int queueElement(int element){
int queue_index = -1;
#ifdef PREFIX_SUM
__shared__ int writeAddr[NUM_THREADS];
__shared__ int exclusiveScan[NUM_THREADS];
__shared__ int global_queue_index;
if(threadIdx.x == 0){
global_queue_index = outQueueHead[blockIdx.x];
}
// set to 1 threards that are writing
writeAddr[threadIdx.x] = ((element) != (-1) ? (1):(0));
// run a prefix-sum on threads inserting data to the queue
scan(writeAddr, exclusiveScan);
// calculate index into the queue where give thread is writing
queue_index = global_queue_index+exclusiveScan[threadIdx.x];
// If there is data to be queued, do it
if(element != -1){
curOutQueue[blockIdx.x][queue_index] = element;
}
// thread 0 updates head of the queue
if(threadIdx.x == 0){
outQueueHead[blockIdx.x]+=exclusiveScan[NUM_THREADS-1]+writeAddr[NUM_THREADS-1];
}
#else
if(element != -1){
queue_index = atomicAdd((int*)&outQueueHead[blockIdx.x], 1);
if(queue_index < outQueueMaxSize[blockIdx.x]){
curOutQueue[blockIdx.x][queue_index] = element;
}else{
queue_index = -1;
}
}
#endif
return queue_index;
}
// Makes queue 1 point to queue 2, and vice-versa
__device__ void swapQueues(int loopIt){
__syncthreads();
if(loopIt %2 == 0){
curInQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
curOutQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
if(threadIdx.x == 0){
inQueueSize[blockIdx.x] = outQueueHead[blockIdx.x];
outQueueHead[blockIdx.x] = 0;
inQueueHead[blockIdx.x] = 0;
// This is used for profiling only
totalInserts[blockIdx.x]+=inQueueSize[blockIdx.x];
}
}else{
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
if(threadIdx.x == 0){
inQueueSize[blockIdx.x] = outQueueHead[blockIdx.x];
outQueueHead[blockIdx.x] = 0;
inQueueHead[blockIdx.x] = 0;
// This is used for profiling only
totalInserts[blockIdx.x]+=inQueueSize[blockIdx.x];
}
}
__syncthreads();
}
// -2, nothing else to be done at all
__device__ int dequeueElement(int *loopIt){
// did this block got something to do?
__shared__ volatile int gotWork;
getWork:
gotWork = 0;
// Try to get some work.
// int queue_index = atomicAdd((int*)&inQueueHead, 1);
int queue_index = inQueueHead[blockIdx.x] + threadIdx.x;
// I must guarantee that idle threads are set to 0, and no other thread
// will come later and set it to 0 again
__syncthreads();
if(threadIdx.x == 0){
inQueueHead[blockIdx.x]+=blockDim.x;
// if(loopIt[0] < 1){
// printf("inQueueSize = %d loopIt[0] = %d queue_index = %d outQueueHead = %d\n", inQueueSize[blockIdx.x], loopIt[0], queue_index, outQueueHead[blockIdx.x]);
// }
}
// Nothing to do by default
int element = -1;
if(queue_index < inQueueSize[blockIdx.x]){
element = curInQueue[blockIdx.x][queue_index];
gotWork = 1;
}
__syncthreads();
// This block does not have anything to process
if(!gotWork){
// if(loopIt[0] < 20 && threadIdx.x == 0)
// printf("inQueueSize = %d loopIt[0] = %d\n", inQueueSize[blockIdx.x], loopIt[0]);
element = -2;
if(outQueueHead[blockIdx.x] != 0){
swapQueues(loopIt[0]);
loopIt[0]++;
goto getWork;
}
}
return element;
}
// Initialized queue data structures:
// Initial assumptions: this first kernel should be launched with number of threads at least equal
// to the number of block used with the second kernel
// inQueueData ptr size is same as outQueueMaxSize provided.
__global__ void initQueue(int *inQueueData, int dataElements, int *outQueueData, int outMaxSize){
if(threadIdx.x < 1){
// Simply assign input data pointers/number of elements to the queue
inQueuePtr1[threadIdx.x] = inQueueData;
// printf("initQueueVector: tid - %d dataElements = %d pointer = %p\n", threadIdx.x, dataElements, inQueueData);
inQueueSize[threadIdx.x] = dataElements;
totalInserts[threadIdx.x] = 0;
// alloc second vector used to queue output elements
outQueuePtr2[threadIdx.x] = outQueueData;
// Maximum number of elements that fit into the queue
outQueueMaxSize[threadIdx.x] = outMaxSize;
// Head of the out queue
outQueueHead[threadIdx.x] = 0;
// Head of the in queue
inQueueHead[threadIdx.x] = 0;
}
}
__global__ void initQueueId(int *inQueueData, int dataElements, int *outQueueData, int outMaxSize, int qId){
if(threadIdx.x < 1){
// Simply assign input data pointers/number of elements to the queue
inQueuePtr1[qId] = inQueueData;
// printf("initQueueVector: tid - %d dataElements = %d pointer = %p\n", threadIdx.x, dataElements, inQueueData);
inQueueSize[qId] = dataElements;
totalInserts[qId] = 0;
// alloc second vector used to queue output elements
outQueuePtr2[qId] = outQueueData;
// Maximum number of elements that fit into the queue
outQueueMaxSize[qId] = outMaxSize;
// Head of the out queue
outQueueHead[qId] = 0;
// Head of the in queue
inQueueHead[qId] = 0;
execution_code=0;
}
}
__global__ void initQueueVector(int **inQueueData, int *inQueueSizes, int **outQueueData, int numImages){
if(threadIdx.x < MAX_NUM_BLOCKS && threadIdx.x < numImages){
// printf("initQueueVector: tid - %d inQueueSize[%d] = %d pointer = %p outPtr = %p\n", threadIdx.x, threadIdx.x, inQueueSizes[threadIdx.x], inQueueData[threadIdx.x], outQueueData[threadIdx.x]);
// Simply assign input data pointers/number of elements to the queue
inQueuePtr1[threadIdx.x] = inQueueData[threadIdx.x];
inQueueSize[threadIdx.x] = inQueueSizes[threadIdx.x];
totalInserts[threadIdx.x] = 0;
// alloc second vector used to queue output elements
outQueuePtr2[threadIdx.x] = outQueueData[threadIdx.x];
// Maximum number of elements that fit into the queue
outQueueMaxSize[threadIdx.x] = (inQueueSizes[threadIdx.x]+1000) * 2;
// Head of the out queue
outQueueHead[threadIdx.x] = 0;
// Head of the in queue
inQueueHead[threadIdx.x] = 0;
}
}
// Returns what should be queued
__device__ int propagate(int *seeds, unsigned char *image, int x, int y, int ncols, unsigned char pval){
int returnValue = -1;
int index = y*ncols + x;
unsigned char seedXYval = seeds[index];
unsigned char imageXYval = image[index];
if((seedXYval < pval) && (imageXYval != seedXYval)){
unsigned char newValue = min(pval, imageXYval);
// this should be a max atomic...
atomicMax(&(seeds[index]), newValue);
returnValue = index;
}
return returnValue;
}
__global__ void listReduceKernel(int* d_Result, int *seeds, unsigned char *image, int ncols, int nrows){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][5];
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >= 0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit > 0){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[0]=totalInserts[blockIdx.x];
}
extern "C" int listComputation(int *h_Data, int dataElements, int *d_seeds, unsigned char *d_image, int ncols, int nrows){
// seeds contais the maker and it is also the output image
// uint threadsX = 512;
int blockNum = 1;
int *d_Result;
int *d_Data;
unsigned int dataSize = dataElements * sizeof(int);
cudaMalloc((void **)&d_Data, dataSize );
cudaMemcpy(d_Data, h_Data, dataSize, cudaMemcpyHostToDevice);
// alloc space to save output elements in the queue
int *d_OutVector;
cudaMalloc((void **)&d_OutVector, sizeof(int) * dataElements);
// printf("Init queue data!\n");
// init values of the __global__ variables used by the queue
initQueue<<<1, 1>>>(d_Data, dataElements, d_OutVector, dataElements);
// init_sync<<<1, 1>>>();
cudaMalloc((void **)&d_Result, sizeof(int) ) ;
cudaMemset((void *)d_Result, 0, sizeof(int));
// printf("Run computation kernel!\n");
listReduceKernel<<<blockNum, NUM_THREADS>>>(d_Result, d_seeds, d_image, ncols, nrows);
// cutilCheckMsg("histogramKernel() execution failed\n");
int h_Result;
cudaMemcpy(&h_Result, d_Result, sizeof(int), cudaMemcpyDeviceToHost);
printf(" #queue entries = %d\n",h_Result);
cudaFree(d_Data);
cudaFree(d_Result);
cudaFree(d_OutVector);
// TODO: free everyone
return h_Result;
}
__global__ void morphReconKernel(int* d_Result, int *seeds, unsigned char *image, int ncols, int nrows){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][5];
// printf("inQueueSize = %d\n",inQueueSize[blockIdx.x]);
__syncthreads();
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >=0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit >= 0 && y > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && y < nrows-1){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[0]=totalInserts[blockIdx.x];
}
__global__ void morphReconKernelVector(int* d_Result, int **d_SeedsList, unsigned char **d_ImageList, int *d_ncols, int *d_nrows, int connectivity=4){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
// if(threadIdx.x == 0){
// printf("inqueue = %p outqueue = %p ncols = %d nrows = %d connectivity=%d\n", inQueuePtr1[blockIdx.x], outQueuePtr2[blockIdx.x], d_ncols[blockIdx.x], d_nrows[blockIdx.x], connectivity);
// }
int *seeds = d_SeedsList[blockIdx.x];
unsigned char *image = d_ImageList[blockIdx.x];
int ncols = d_ncols[blockIdx.x];
int nrows = d_nrows[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][9];
__syncthreads();
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >= 0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit >= 0 && y > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && y < nrows-1){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// if connectivity is 8, four other neighbors have to be verified
if(connectivity == 8){
if(workUnit >= 0 && y > 0 && x >0){
retWork = propagate((int*)seeds, image, x-1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y > 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x >0){
retWork = propagate((int*)seeds, image, x-1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x <(ncols-1)){
retWork = propagate((int*)seeds, image, x+1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
}
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[blockIdx.x]=totalInserts[blockIdx.x];
}
/// This is an old implementation for this function. Presumably about 1ms faster, but quite more ugly
///extern "C" int morphReconVector(int nImages, int **h_InputListPtr, int* h_ListSize, int **h_Seeds, unsigned char **h_Images, int* h_ncols, int* h_nrows){
///// seeds contais the maker and it is also the output image
/// int blockNum = nImages;
/// int *d_Result;
///
/// // alloc space to save output elements in the queue
/// int **h_OutQueuePtr = (int **)malloc(sizeof(int*) * nImages);;
///
/// for(int i = 0; i < nImages;i++){
/// cudaMalloc((void **)&h_OutQueuePtr[i], sizeof(int) * (h_ListSize[i]+1000) * 2);
/// }
///
/// int **d_OutQueuePtr = NULL;
/// cudaMalloc((void **)&d_OutQueuePtr, sizeof(int*) * nImages);
/// cudaMemcpy(d_OutQueuePtr, h_OutQueuePtr, sizeof(int*) * nImages, cudaMemcpyHostToDevice);
///
///
///
/// printf("nImages = %d\n", nImages);
///
/// int **d_InputListPtr = NULL;
/// cudaMalloc((void **)&d_InputListPtr, sizeof(int*) * nImages);
/// cudaMemcpy(d_InputListPtr, h_InputListPtr, sizeof(int*) * nImages, cudaMemcpyHostToDevice);
///
///
/// int *d_ListSize = NULL;
/// cudaMalloc((void **)&d_ListSize, sizeof(int) * nImages);
/// cudaMemcpy(d_ListSize, h_ListSize, sizeof(int) * nImages, cudaMemcpyHostToDevice);
///
/// // init values of the __global__ variables used by the queue
/// initQueueVector<<<1, nImages>>>(d_InputListPtr, d_ListSize, d_OutQueuePtr, nImages);
///
/// cudaMalloc((void **)&d_Result, sizeof(int)*nImages) ;
/// cudaMemset((void *)d_Result, 0, sizeof(int)*nImages);
///
/// int **d_Seeds = NULL;
/// cudaMalloc((void **)&d_Seeds, sizeof(int*) * nImages);
/// cudaMemcpy(d_Seeds, h_Seeds, sizeof(int*) * nImages, cudaMemcpyHostToDevice);
///
/// unsigned char **d_Images = NULL;
/// cudaMalloc((void **)&d_Images, sizeof(unsigned char*) * nImages);
/// cudaMemcpy(d_Images, h_Images, sizeof(unsigned char*) * nImages, cudaMemcpyHostToDevice);
///
/// int *d_ncols = NULL;
/// cudaMalloc((void **)&d_ncols, sizeof(int) * nImages);
/// cudaMemcpy(d_ncols, h_ncols, sizeof(int) * nImages, cudaMemcpyHostToDevice);
///
/// int *d_nrows = NULL;
/// cudaMalloc((void **)&d_nrows, sizeof(int) * nImages);
/// cudaMemcpy(d_nrows, h_nrows, sizeof(int) * nImages, cudaMemcpyHostToDevice);
///
/// printf("Run computation kernel!\n");
/// morphReconKernelVector<<<blockNum, NUM_THREADS>>>(d_Result, d_Seeds, d_Images, d_ncols, d_nrows);
///
/// cudaError_t errorCode = cudaGetLastError();
/// const char *error = cudaGetErrorString(errorCode);
/// printf("Error after morphRecon = %s\n", error);
///
/// int h_Result;
/// cudaMemcpy(&h_Result, d_Result, sizeof(int), cudaMemcpyDeviceToHost);
///
/// printf(" #queue entries = %d\n",h_Result);
///
////// cudaFree(d_nrows);
////// cudaFree(d_ncols);
////// cudaFree(d_Images);
////// cudaFree(d_Seeds);
////// cudaFree(d_InputListPtr);
////// cudaFree(d_ListSize);
////// cudaFree(d_Result);
////// cudaFree(d_OutQueuePtr);
////// for(int i = 0; i < nImages; i++){
////// cudaFree(h_OutQueuePtr[i]);
////// cudaFree(h_InputListPtr[i]);
////// }
////// free(h_OutQueuePtr);
///
/// // TODO: free everyone
/// return h_Result;
///}
extern "C" int morphReconVector(int nImages, int **h_InputListPtr, int* h_ListSize, int **h_Seeds, unsigned char **h_Images, int* h_ncols, int* h_nrows, int connectivity){
// seeds contais the maker and it is also the output image
int blockNum = nImages;
int *d_Result;
// alloc space to save output elements in the queue
int **h_OutQueuePtr = (int **)malloc(sizeof(int*) * nImages);;
for(int i = 0; i < nImages;i++){
cudaMalloc((void **)&h_OutQueuePtr[i], sizeof(int) * (h_ListSize[i]+1000) * 2);
}
// Init queue for each images. yes, this may not be the most efficient way, but the code is far easier to read.
// Another version, where all pointer are copied at once to the GPU was also built, buit it was only about 1ms
// faster. Thus, we decide to go with this version
for(int i = 0; i < nImages;i++)
initQueueId<<<1, 1>>>(h_InputListPtr[i], h_ListSize[i], h_OutQueuePtr[i], (h_ListSize[i]+1000) *2, i);
cudaMalloc((void **)&d_Result, sizeof(int)*nImages) ;
cudaMemset((void *)d_Result, 0, sizeof(int)*nImages);
int **d_Seeds = NULL;
cudaMalloc((void **)&d_Seeds, sizeof(int*) * nImages);
cudaMemcpy(d_Seeds, h_Seeds, sizeof(int*) * nImages, cudaMemcpyHostToDevice);
unsigned char **d_Images = NULL;
cudaMalloc((void **)&d_Images, sizeof(unsigned char*) * nImages);
cudaMemcpy(d_Images, h_Images, sizeof(unsigned char*) * nImages, cudaMemcpyHostToDevice);
int *d_ncols = NULL;
cudaMalloc((void **)&d_ncols, sizeof(int) * nImages);
cudaMemcpy(d_ncols, h_ncols, sizeof(int) * nImages, cudaMemcpyHostToDevice);
int *d_nrows = NULL;
cudaMalloc((void **)&d_nrows, sizeof(int) * nImages);
cudaMemcpy(d_nrows, h_nrows, sizeof(int) * nImages, cudaMemcpyHostToDevice);
// printf("Run computation kernel!\n");
morphReconKernelVector<<<blockNum, NUM_THREADS>>>(d_Result, d_Seeds, d_Images, d_ncols, d_nrows, connectivity);
if(cudaGetLastError() != cudaSuccess){
cudaError_t errorCode = cudaGetLastError();
const char *error = cudaGetErrorString(errorCode);
printf("Error after morphRecon = %s\n", error);
}
int *h_Result = (int *) malloc(sizeof(int) * blockNum);
cudaMemcpy(h_Result, d_Result, sizeof(int) * blockNum, cudaMemcpyDeviceToHost);
int resutRet = h_Result[0];
// printf(" #queue entries = %d\n",h_Result[0]);
free(h_Result);
cudaFree(d_nrows);
cudaFree(d_ncols);
cudaFree(d_Images);
cudaFree(d_Seeds);
cudaFree(d_Result);
for(int i = 0; i < nImages; i++){
cudaFree(h_OutQueuePtr[i]);
cudaFree(h_InputListPtr[i]);
}
free(h_OutQueuePtr);
return resutRet;
}
__global__ void morphReconKernelSpeedup(int* d_Result, int *d_Seeds, unsigned char *d_Image, int ncols, int nrows, int connectivity=4){
curInQueue[blockIdx.x] = inQueuePtr1[blockIdx.x];
curOutQueue[blockIdx.x] = outQueuePtr2[blockIdx.x];
int *seeds = d_Seeds;
unsigned char *image = d_Image;
// if(threadIdx.x == 0){
// printf("inqueue = %p outqueue = %p ncols = %d nrows = %d connectivity=%d\n", inQueuePtr1[blockIdx.x], outQueuePtr2[blockIdx.x], ncols, nrows, connectivity);
// }
// int *seeds = d_SeedsList[blockIdx.x];
// unsigned char *image = d_ImageList[blockIdx.x];
// int ncols = d_ncols[blockIdx.x];
// int nrows = d_nrows[blockIdx.x];
int loopIt = 0;
int workUnit = -1;
int tid = threadIdx.x;
__shared__ int localQueue[NUM_THREADS][9];
__syncthreads();
do{
int x, y;
localQueue[tid][0] = 0;
// Try to get some work.
workUnit = dequeueElement(&loopIt);
y = workUnit/ncols;
x = workUnit%ncols;
unsigned char pval = 0;
if(workUnit >= 0){
pval = seeds[workUnit];
}
int retWork = -1;
if(workUnit >= 0 && y > 0){
retWork = propagate((int*)seeds, image, x, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && y < nrows-1){
retWork = propagate((int*)seeds, image, x, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x > 0){
retWork = propagate((int*)seeds, image, x-1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// queueElement(retWork);
if(workUnit >= 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
// if connectivity is 8, four other neighbors have to be verified
if(connectivity == 8){
if(workUnit >= 0 && y > 0 && x >0){
retWork = propagate((int*)seeds, image, x-1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y > 0 && x < ncols-1){
retWork = propagate((int*)seeds, image, x+1, y-1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x >0){
retWork = propagate((int*)seeds, image, x-1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
if(workUnit >= 0 && y < (nrows-1) && x <(ncols-1)){
retWork = propagate((int*)seeds, image, x+1, y+1, ncols, pval);
if(retWork > 0){
localQueue[tid][0]++;
localQueue[tid][localQueue[tid][0]] = retWork;
}
}
}
// queueElement(retWork);
queueElement(localQueue[tid]);
}while(workUnit != -2);
d_Result[blockIdx.x]=totalInserts[blockIdx.x];
if(execution_code!=0){
d_Result[gridDim.x]=1;
}
}
extern "C" int morphReconSpeedup( int *g_InputListPtr, int h_ListSize, int *g_Seed, unsigned char *g_Image, int h_ncols, int h_nrows, int connectivity, int nBlocks, float queue_increase_factor){
// seeds contais the maker and it is also the output image
// int nImages = 1;
// TODO: change blockNum to nBlocks
// int nBlocks = nImages;
int *d_Result;
// int *d_return_code;
// float queue_increase_factor = 1.1;
// int nBlocks = 28;
// printf("nBlocks=%d\n",nBlocks);
// alloc space to save output elements in the queue for each block
int **h_OutQueuePtr = (int **)malloc(sizeof(int*) * nBlocks);
// at this moment I should partition the INPUT queue
// printf("List size = %d\n", h_ListSize);
int tempNblocks = nBlocks;
int subListsInit[tempNblocks];
// int subListsEnd[tempNblocks];
int subListsSize[tempNblocks];
for(int i = 0; i < tempNblocks; i++){
int curSubListInit = (h_ListSize/tempNblocks)*i;
int curSubListEnd = ((i+1<tempNblocks)?((i+1)*(h_ListSize/tempNblocks)-1):(h_ListSize-1));
// printf("BlockId = %d - init = %d end = %d size=%d\n", i, curSubListInit, curSubListEnd, curSubListEnd-curSubListInit+1);
subListsInit[i] = curSubListInit;
// subListsEnd[i] = curSubListEnd;
subListsSize[i] = curSubListEnd-curSubListInit+1;
}
// Adding code
// TODO: free data
int *blockSubLists[tempNblocks];
for(int i = 0; i < tempNblocks; i++){
cudaMalloc((void **)&blockSubLists[i], sizeof(int)*(subListsSize[i]) * queue_increase_factor);
cudaMemcpy(blockSubLists[i], &g_InputListPtr[subListsInit[i]], subListsSize[i] * sizeof(int), cudaMemcpyDeviceToDevice);
}
// End adding code
// printf("h_listSize = %d subListsSize[0]=%d\n", h_ListSize, subListsSize[0]);
// cout << "h_listSize = "<< h_ListSize<< " subListsSize[0]="<< subListsSize[0] <<endl;
for(int i = 0; i < tempNblocks;i++){
cudaMalloc((void **)&h_OutQueuePtr[i], sizeof(int) * (subListsSize[i]) * queue_increase_factor);
}
// Init queue for each image. yes, this may not be the most efficient way, but the code is far easier to read.
// Another version, where all pointer are copied at once to the GPU was also built, buit it was only about 1ms
// faster. Thus, we decide to go with this version
// for(int i = 0; i < nBlocks;i++)
// initQueueId<<<1, 1>>>(h_InputListPtr[i], h_ListSize[i], h_OutQueuePtr[i], (h_ListSize[i]+1000) *2, i);
for(int i = 0; i < nBlocks;i++)
initQueueId<<<1, 1>>>(blockSubLists[i], subListsSize[i], h_OutQueuePtr[i], (subListsSize[i]) *queue_increase_factor, i);
// initQueueId<<<1, 1>>>(g_InputListPtr, h_ListSize, h_OutQueuePtr[i], (h_ListSize+1000) *2, i);
// This is used by each block to store the number of queue operations performed
cudaMalloc((void **)&d_Result, sizeof(int)*(nBlocks+1)) ;
cudaMemset((void *)d_Result, 0, sizeof(int)*(nBlocks+1));
// printf("Run computation kernel!\n");
morphReconKernelSpeedup<<<nBlocks, NUM_THREADS>>>(d_Result, g_Seed, g_Image, h_ncols, h_nrows, connectivity);
if(cudaGetLastError() != cudaSuccess){
cudaError_t errorCode = cudaGetLastError();
const char *error = cudaGetErrorString(errorCode);
printf("Error after morphRecon = %s\n", error);
}
int *h_Result = (int *) malloc(sizeof(int) * (nBlocks+1));
cudaMemcpy(h_Result, d_Result, sizeof(int) * (nBlocks+1), cudaMemcpyDeviceToHost);
int resutRet = h_Result[nBlocks];
// for(int i = 0; i < nBlocks; i++){
// printf(" block# %d, #entries=%d\n", i, h_Result[i]);
// }
// printf(" Exec. Error code = %d\n", h_Result[nBlocks]);
free(h_Result);
cudaFree(d_Result);
for(int i = 0; i < nBlocks; i++){
cudaFree(h_OutQueuePtr[i]);
}
free(h_OutQueuePtr);
cudaFree(g_InputListPtr);
return resutRet;
}
extern "C" int morphRecon(int *d_input_list, int dataElements, int *d_seeds, unsigned char *d_image, int ncols, int nrows){
// seeds contais the maker and it is also the output image
int blockNum = 1;
int *d_Result;
// alloc space to save output elements in the queue
int *d_OutVector;
cudaMalloc((void **)&d_OutVector, sizeof(int) * (dataElements+1000) * 2 );
// init values of the __global__ variables used by the queue
initQueue<<<1, 1>>>(d_input_list, dataElements, d_OutVector, (dataElements+1000) * 2);
cudaMalloc((void **)&d_Result, sizeof(int) ) ;
cudaMemset((void *)d_Result, 0, sizeof(int));
// printf("Run computation kernel!\n");
morphReconKernel<<<blockNum, NUM_THREADS>>>(d_Result, d_seeds, d_image, ncols, nrows);
cudaError_t errorCode = cudaGetLastError();
const char *error = cudaGetErrorString(errorCode);
printf("Error after morphRecon = %s\n", error);
int h_Result;
cudaMemcpy(&h_Result, d_Result, sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf(" #queue entries = %d\n",h_Result);
cudaFree(d_Result);
cudaFree(d_OutVector);
// TODO: free everyone
return h_Result;
}
|
61b62419cbd4e560650e9a8422df90d272a9e5ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAKernels.h"
#include <cstdio>
const size_t convolutionSize = 7;
__constant__ float convolutionKernel[convolutionSize]{ 2, -27, 270, -490, 270, -27, 2 };
__global__ void convolveArrayHorizontal(float* input, float* output, size_t dataPitch, int XX, int YY, float scale)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
float* row = (float *)((char*)output + tidy * dataPitch);
float* in = (float *)((char*)input + tidy * dataPitch);
float value = 0.0;
for (int i = 0; i < convolutionSize; i++)
{
int ax = tidx + i - convolutionSize / 2;
value += ax >= 0 ? (ax < XX ? convolutionKernel[i] * in[ax] : 0.0f) : 0.0f;
}
row[tidx] = value * scale;
}
}
__global__ void convolveArrayVertical(float* input, float* output, size_t dataPitch, int XX, int YY, float scale)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
float* row = (float *)((char*)output + tidy * dataPitch);
float* in = (float *)((char*)input + tidy * dataPitch);
float value = 0.0;
for (int i = 0; i < convolutionSize; i++)
{
int ay = tidx + (i - convolutionSize / 2) * (dataPitch / sizeof(float));
int yy = (sizeof(float)) * ay / dataPitch + tidy;
value += yy >= 0 ? (yy < YY ? convolutionKernel[i] * in[ay] : 0.0f) : 0.0f;
}
row[tidx] = value * scale;
}
}
__global__ void add(float* a, float* b, float* out, float sa, float sb, size_t XX, size_t YY)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
out[tidy*XX + tidx] = a[tidy*XX + tidx] * sa + b[tidy*XX + tidx] * sb;
}
}
__global__ void mult(float* a, float* b, float* out, float scale, size_t XX, size_t YY)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
out[tidy*XX + tidx] = a[tidy*XX + tidx] * b[tidy*XX + tidx] * scale;
}
}
__global__ void applyBoundary(float* positions, float* boundary, float* out, float amount, size_t XX, size_t YY)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
out[tidy*XX + tidx] = positions[tidy*XX + tidx] * (boundary[tidy*XX + tidx] * amount + (1.0f - amount));
}
}
__global__ void gaussianAdd(float2 position, float2 boundary, float size, float mult, float* out, size_t width, size_t height, size_t pitch)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < width) && (tidy < height))
{
// np.exp(-pow(x - position[0], 2) / (2 * size * size) - pow(y - position[1], 2) / (2 * size * size))
float x = boundary.x * (float) tidx / width;
float y = boundary.y * (float) tidy / height;
float n = 2 * size * size;
out[tidy*(pitch / sizeof(float)) + tidx] += mult * expf( - powf(x - position.x, 2) / n - powf(y - position.y, 2) / n);
}
}
__global__ void writeBack(float* d_position, float* deviceResults, float2 destPosition, float2 boundary, size_t iteration, size_t width, size_t height)
{
float ix = destPosition.x / boundary.x;
float iy = destPosition.y / boundary.y;
size_t indexX = floor(ix * width);
size_t indexY = floor(iy * height);
deviceResults[iteration] = d_position[width * indexY + indexX];
} | 61b62419cbd4e560650e9a8422df90d272a9e5ab.cu | #include "CUDAKernels.h"
#include <cstdio>
const size_t convolutionSize = 7;
__constant__ float convolutionKernel[convolutionSize]{ 2, -27, 270, -490, 270, -27, 2 };
__global__ void convolveArrayHorizontal(float* input, float* output, size_t dataPitch, int XX, int YY, float scale)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
float* row = (float *)((char*)output + tidy * dataPitch);
float* in = (float *)((char*)input + tidy * dataPitch);
float value = 0.0;
for (int i = 0; i < convolutionSize; i++)
{
int ax = tidx + i - convolutionSize / 2;
value += ax >= 0 ? (ax < XX ? convolutionKernel[i] * in[ax] : 0.0f) : 0.0f;
}
row[tidx] = value * scale;
}
}
__global__ void convolveArrayVertical(float* input, float* output, size_t dataPitch, int XX, int YY, float scale)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
float* row = (float *)((char*)output + tidy * dataPitch);
float* in = (float *)((char*)input + tidy * dataPitch);
float value = 0.0;
for (int i = 0; i < convolutionSize; i++)
{
int ay = tidx + (i - convolutionSize / 2) * (dataPitch / sizeof(float));
int yy = (sizeof(float)) * ay / dataPitch + tidy;
value += yy >= 0 ? (yy < YY ? convolutionKernel[i] * in[ay] : 0.0f) : 0.0f;
}
row[tidx] = value * scale;
}
}
__global__ void add(float* a, float* b, float* out, float sa, float sb, size_t XX, size_t YY)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
out[tidy*XX + tidx] = a[tidy*XX + tidx] * sa + b[tidy*XX + tidx] * sb;
}
}
__global__ void mult(float* a, float* b, float* out, float scale, size_t XX, size_t YY)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
out[tidy*XX + tidx] = a[tidy*XX + tidx] * b[tidy*XX + tidx] * scale;
}
}
__global__ void applyBoundary(float* positions, float* boundary, float* out, float amount, size_t XX, size_t YY)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < XX) && (tidy < YY))
{
out[tidy*XX + tidx] = positions[tidy*XX + tidx] * (boundary[tidy*XX + tidx] * amount + (1.0f - amount));
}
}
__global__ void gaussianAdd(float2 position, float2 boundary, float size, float mult, float* out, size_t width, size_t height, size_t pitch)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if ((tidx < width) && (tidy < height))
{
// np.exp(-pow(x - position[0], 2) / (2 * size * size) - pow(y - position[1], 2) / (2 * size * size))
float x = boundary.x * (float) tidx / width;
float y = boundary.y * (float) tidy / height;
float n = 2 * size * size;
out[tidy*(pitch / sizeof(float)) + tidx] += mult * expf( - powf(x - position.x, 2) / n - powf(y - position.y, 2) / n);
}
}
__global__ void writeBack(float* d_position, float* deviceResults, float2 destPosition, float2 boundary, size_t iteration, size_t width, size_t height)
{
float ix = destPosition.x / boundary.x;
float iy = destPosition.y / boundary.y;
size_t indexX = floor(ix * width);
size_t indexY = floor(iy * height);
deviceResults[iteration] = d_position[width * indexY + indexX];
} |
a3857a5c6e7027590d6c162dc89b567f020612ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void tovalue_kernal(float* data, const float value, const int totaltc)
{
const uint idx = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS;
if(idx < totaltc){
data[idx] = value;
}
} | a3857a5c6e7027590d6c162dc89b567f020612ae.cu | #include "includes.h"
__global__ void tovalue_kernal(float* data, const float value, const int totaltc)
{
const uint idx = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS;
if(idx < totaltc){
data[idx] = value;
}
} |
90cd4a05cf96378ae7cc7ed223a53efee17d7462.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define CELL_WIDTH_FACTOR 2.0f
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 200
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos_buffer;
glm::vec3 *dev_vel1_buffer;
glm::vec3 *dev_vel2_buffer;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
//initialize numObjects here
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = CELL_WIDTH_FACTOR * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
//these two arrays only use to store cell information, so no need to be as many as numObjects
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
//2.3 additional buffer to help reshuffle
hipMalloc((void**)&dev_pos_buffer, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos_buffer failed!");
hipMalloc((void**)&dev_vel1_buffer, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1_buffer failed!");
hipMalloc((void**)&dev_vel2_buffer, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2_buffer failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 perceived_center(0.f,0.f,0.f);
glm::vec3 avoidance_velocity(0.f, 0.f, 0.f);
glm::vec3 perceived_velocity(0.f, 0.f, 0.f);
glm::vec3 return_vel(0.f, 0.f, 0.f);
int neighbor_count_rule1 = 0;
int neighbor_count_rule3 = 0;
//pre load all needed data
glm::vec3 curr_boid_pos = pos[iSelf];
for (int idx = 0; idx < N; ++idx)
{
//if b = boid skip the rest actions
if (idx == iSelf) continue;
//load current boid pos
glm::vec3 idx_boid_pos = pos[idx];
float dist = glm::distance(idx_boid_pos, curr_boid_pos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist <= rule1Distance)
{
perceived_center += idx_boid_pos;
neighbor_count_rule1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist <= rule2Distance)
{
avoidance_velocity -= (idx_boid_pos - curr_boid_pos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist <= rule3Distance)
{
perceived_velocity += vel[idx];
neighbor_count_rule3++;
}
}
//if we use N-1, the particles will shrink to the center of cube -- helped by Hannar
glm::vec3 rule1_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule2_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule3_component = glm::vec3(0.f, 0.f, 0.f);
if (neighbor_count_rule1 > 0)
{
rule1_component = (perceived_center / (float)neighbor_count_rule1 - curr_boid_pos) * rule1Scale;
}
rule2_component = avoidance_velocity * rule2Scale;
if (neighbor_count_rule3 > 0)
{
rule3_component = (perceived_velocity / (float)neighbor_count_rule3) * rule3Scale;
}
//helped by Hanna ReadMe Rule part sum all rules' and current velocity
return_vel += vel[iSelf] + rule1_component + rule2_component + rule3_component;
return return_vel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
//Compute the index of current thread
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > N)
{
return;
}
glm::vec3 new_velocity = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float curr_speed = glm::length(new_velocity);
//if the total speed of vel is larger than maxSpeed, we normalize the vel and apply the maxSpeed we allow -- do we need to care negative speed?
if (curr_speed > maxSpeed)
{
new_velocity = glm::normalize(new_velocity) * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1? --- because other boids might need that
vel2[index] = new_velocity;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x) tick
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
//compute the correspond index in x, y, z axis and use gridIndex3Dto1D to store the actual 1D index -- why we need inverseCellWidth -- mult is faster than divide
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
//pre store info
glm::vec3 curr_pos = pos[idx];
int idx_x = (curr_pos.x - gridMin.x) * inverseCellWidth;
int idx_y = (curr_pos.y - gridMin.y) * inverseCellWidth;
int idx_z = (curr_pos.z - gridMin.z) * inverseCellWidth;
//combine to get the 1D index
int gridIndex = gridIndex3Dto1D(idx_x, idx_y, idx_z, gridResolution);
//store to indices and gridIndices correspondingly
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[idx] = idx;
gridIndices[idx] = gridIndex;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
//what happen for those we don't include any particle? is there an identifier to show its identity?
//so why do we need th Particles array? we don't even pass in it -- the arranged start and end indices are for us to access dev_particleArrayIndices, which has been sorted
//may have better way to do
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
int target_grid_index = particleGridIndices[idx];
//look at the one before and the one after, if diff, store
//head must be a start
if (idx == 0)
{
gridCellStartIndices[target_grid_index] = idx;
}
if (idx == N - 1)
{
gridCellEndIndices[target_grid_index] = idx;
return;
}
//test
//const int nextGrid = particleGridIndices[idx + 1];
//if (target_grid_index != nextGrid)
//{
// gridCellEndIndices[target_grid_index] = idx;
// gridCellStartIndices[nextGrid] = idx + 1;
//}
//original
//check one before and one after
if (target_grid_index != 0 && target_grid_index != particleGridIndices[idx - 1])
{
//start of a cell
gridCellStartIndices[target_grid_index] = idx;
}
if (target_grid_index != N - 1 && target_grid_index != particleGridIndices[idx + 1])
{
gridCellEndIndices[target_grid_index] = idx;
}
}
//very similar to photon mapping
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 curr_boid_pos = pos[idx];
int idx_x = (curr_boid_pos.x - gridMin.x) * inverseCellWidth;
int idx_y = (curr_boid_pos.y - gridMin.y) * inverseCellWidth;
int idx_z = (curr_boid_pos.z - gridMin.z) * inverseCellWidth;
//combine to get the 1D index
int gridIndex = gridIndex3Dto1D(idx_x, idx_y, idx_z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
//by calculating those cells that interact with the sphere with the neighbor_radius(::max(::max(rule1Distance, rule2Distance), rule3Distance)) -not allowed unless it is const float
const float neighbor_radius = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
//incorrect here, but why?
//int max_x = glm::floor((curr_boid_pos.x + neighbor_radius - gridMin.x) * inverseCellWidth);
//max_x = max_x > gridResolution - 1 ? gridResolution - 1 : max_x;
//int min_x = glm::floor((curr_boid_pos.x - neighbor_radius - gridMin.x) * inverseCellWidth);
//min_x = min_x < 0 ? 0 : min_x;
//int max_y = glm::floor((curr_boid_pos.y + neighbor_radius - gridMin.y) * inverseCellWidth);
//max_y = max_y > gridResolution - 1 ? gridResolution - 1 : max_y;
//int min_y = glm::floor((curr_boid_pos.y + neighbor_radius - gridMin.y) * inverseCellWidth);
//min_y = min_y < 0 ? 0 : min_y;
//int max_z = glm::floor((curr_boid_pos.z + neighbor_radius - gridMin.z) * inverseCellWidth);
//max_z = max_z > gridResolution - 1 ? gridResolution - 1 : max_z;
//int min_z = glm::floor((curr_boid_pos.z + neighbor_radius - gridMin.z) * inverseCellWidth);
//min_z = min_z < 0 ? 0 : min_z;
//find the minimum index and max index of the cell we might need to use
glm::vec3 grid_index_min = glm::floor((curr_boid_pos - gridMin - glm::vec3(neighbor_radius)) * inverseCellWidth);
glm::vec3 grid_index_max = glm::floor((curr_boid_pos - gridMin + glm::vec3(neighbor_radius)) * inverseCellWidth);
int max_x = imin(gridResolution - 1, grid_index_max.x);
int max_y = imin(gridResolution - 1, grid_index_max.y);
int max_z = imin(gridResolution - 1, grid_index_max.z);
int min_x = imax(0, grid_index_min.x);
int min_y = imax(0, grid_index_min.y);
int min_z = imax(0, grid_index_min.z);
// - For each cell, read the start/end indices in the boid pointer array.
glm::vec3 perceived_center(0.f, 0.f, 0.f);
glm::vec3 avoidance_velocity(0.f, 0.f, 0.f);
glm::vec3 perceived_velocity(0.f, 0.f, 0.f);
glm::vec3 new_velocity(0.f, 0.f, 0.f);
int neighbor_count_rule1 = 0;
int neighbor_count_rule3 = 0;
//is this a grid-looping optimization?
for (int x_cord_idx = min_x; x_cord_idx <= max_x; ++x_cord_idx)
{
for (int y_cord_idx = min_y; y_cord_idx <= max_y; ++y_cord_idx)
{
for (int z_cord_idx = min_z; z_cord_idx <= max_z; ++z_cord_idx)
{
int curr_gridIndex = gridIndex3Dto1D(x_cord_idx, y_cord_idx, z_cord_idx, gridResolution);
//read the start/end indices
int start = gridCellStartIndices[curr_gridIndex];
int end = gridCellEndIndices[curr_gridIndex];
if (start == -1 || end == -1) {
continue; //no boid in this cell
}
else
{
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int boid_array_idx = start; boid_array_idx <= end; ++boid_array_idx)
{
//the boid_array_idx is only the index in particleArray, need to load it out -- buggy for only few particles moving
int boid_idx = particleArrayIndices[boid_array_idx];
if (boid_idx == idx) continue;
//pre load temp boid pos
glm::vec3 idx_boid_pos = pos[boid_idx];
float dist = glm::distance(curr_boid_pos, idx_boid_pos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist <= rule1Distance)
{
perceived_center += idx_boid_pos;
neighbor_count_rule1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist <= rule2Distance)
{
avoidance_velocity -= (idx_boid_pos - curr_boid_pos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist <= rule3Distance)
{
//use the boid_idx to access its speed, not the origianl speed ....
//perceived_velocity += vel1[idx];
perceived_velocity += vel1[boid_idx];
neighbor_count_rule3++;
}
}
}
}
}
}
//compute the new velocity
glm::vec3 rule1_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule2_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule3_component = glm::vec3(0.f, 0.f, 0.f);
if (neighbor_count_rule1 > 0)
{
rule1_component = (perceived_center / (float)neighbor_count_rule1 - curr_boid_pos) * rule1Scale;
}
rule2_component = avoidance_velocity * rule2Scale;
if (neighbor_count_rule3 > 0)
{
rule3_component = (perceived_velocity / (float)neighbor_count_rule3) * rule3Scale;
}
new_velocity += vel1[idx] + rule1_component + rule2_component + rule3_component;
// - Clamp the speed change before putting the new speed in vel2
float curr_speed = glm::length(new_velocity);
//if the total speed of vel is larger than maxSpeed, we normalize the vel and apply the maxSpeed we allow
if (curr_speed > maxSpeed)
{
new_velocity = glm::normalize(new_velocity) * maxSpeed;
}
vel2[idx] = new_velocity;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 curr_boid_pos = pos[idx];
int idx_x = (curr_boid_pos.x - gridMin.x) * inverseCellWidth;
int idx_y = (curr_boid_pos.y - gridMin.y) * inverseCellWidth;
int idx_z = (curr_boid_pos.z - gridMin.z) * inverseCellWidth;
//combine to get the 1D index
int gridIndex = gridIndex3Dto1D(idx_x, idx_y, idx_z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
//by calculating those cells that interact with the sphere with the neighbor_radius(::max(::max(rule1Distance, rule2Distance), rule3Distance))
const float neighbor_radius = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 grid_index_min = glm::floor((curr_boid_pos - gridMin - glm::vec3(neighbor_radius)) * inverseCellWidth);
glm::vec3 grid_index_max = glm::floor((curr_boid_pos - gridMin + glm::vec3(neighbor_radius)) * inverseCellWidth);
int max_x = imin(gridResolution - 1, grid_index_max.x);
int max_y = imin(gridResolution - 1, grid_index_max.y);
int max_z = imin(gridResolution - 1, grid_index_max.z);
int min_x = imax(0, grid_index_min.x);
int min_y = imax(0, grid_index_min.y);
int min_z = imax(0, grid_index_min.z);
// - For each cell, read the start/end indices in the boid pointer array.
glm::vec3 perceived_center(0.f, 0.f, 0.f);
glm::vec3 avoidance_velocity(0.f, 0.f, 0.f);
glm::vec3 perceived_velocity(0.f, 0.f, 0.f);
glm::vec3 new_velocity(0.f, 0.f, 0.f);
int neighbor_count_rule1 = 0;
int neighbor_count_rule3 = 0;
//is this a grid-looping optimization?
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int z_cord_idx = min_z; z_cord_idx <= max_z; ++z_cord_idx)
{
for (int y_cord_idx = min_y; y_cord_idx <= max_y; ++y_cord_idx)
{
for (int x_cord_idx = min_x; x_cord_idx <= max_x; ++x_cord_idx)
{
int curr_gridIndex = gridIndex3Dto1D(x_cord_idx, y_cord_idx, z_cord_idx, gridResolution);
//read the start/end indices
int start = gridCellStartIndices[curr_gridIndex];
int end = gridCellEndIndices[curr_gridIndex];
if (start == -1 || end == -1) {
continue; //no boid in this cell
}
else
{
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int boid_array_idx = start; boid_array_idx <= end; ++boid_array_idx)
{
//the boid_array_idx is only the index in particleArray, need to load it out -- buggy for only few particles moving
int boid_idx = boid_array_idx;
if (boid_idx == idx) continue;
//pre load temp boid pos
glm::vec3 idx_boid_pos = pos[boid_idx];
float dist = glm::distance(idx_boid_pos, curr_boid_pos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist <= rule1Distance)
{
perceived_center += idx_boid_pos;
neighbor_count_rule1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist <= rule2Distance)
{
avoidance_velocity -= (idx_boid_pos - curr_boid_pos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist <= rule3Distance)
{
perceived_velocity += vel1[boid_idx];
neighbor_count_rule3++;
}
}
}
}
}
}
//compute the new velocity
glm::vec3 rule1_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule2_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule3_component = glm::vec3(0.f, 0.f, 0.f);
if (neighbor_count_rule1 > 0)
{
rule1_component = (perceived_center / (float)neighbor_count_rule1 - curr_boid_pos) * rule1Scale;
}
rule2_component = avoidance_velocity * rule2Scale;
if (neighbor_count_rule3 > 0)
{
rule3_component = (perceived_velocity / (float)neighbor_count_rule3) * rule3Scale;
}
new_velocity = vel1[idx] + rule1_component + rule2_component + rule3_component;
// - Clamp the speed change before putting the new speed in vel2
float curr_speed = glm::length(new_velocity);
//if the total speed of vel is larger than maxSpeed, we normalize the vel and apply the maxSpeed we allow
if (curr_speed > maxSpeed)
{
new_velocity = glm::normalize(new_velocity) * maxSpeed;
}
vel2[idx] = new_velocity;
}
__global__ void kernReshuffleArray(int N, int *particle_array_indices, glm::vec3 *source, glm::vec3 *destination)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
int particle_idx = particle_array_indices[idx];
destination[idx] = source[particle_idx];
}
__global__ void kernReverseArray(int N, int *particle_array_indices, glm::vec3 *source, glm::vec3 *destination)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
int particle_idx = particle_array_indices[idx];
destination[particle_idx] = source[idx];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
int gridSize = (numObjects + blockSize - 1) / blockSize; //helped by Gangzheng Tong
dim3 blocksPerGrid(gridSize);
//first compute the new velocity
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
//Then update the pos
kernUpdatePos << < blocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers -- swap content
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
//set up blocks
int gridSizeParticle = (numObjects + blockSize - 1) / blockSize; //helped by Gangzheng Tong
dim3 blocksPerGridParticle(gridSizeParticle);
int gridSizeGridCell = (gridCellCount + blockSize - 1) / blockSize;
dim3 blocksPerGridGridCell(gridSizeGridCell);
//call kernel
kernComputeIndices << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); //sort by grid index
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//first initialize the two indices list to be -1
hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(blocksPerGridGridCell), dim3(threadsPerBlock) , 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
hipLaunchKernelGGL(( kernResetIntBuffer) , dim3(blocksPerGridGridCell), dim3(threadsPerBlock) , 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
//then call the kernel to compute those who contain boids
hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(blocksPerGridParticle), dim3(threadsPerBlock) , 0, 0, numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << < blocksPerGridParticle, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
int gridSizeParticle = (numObjects + blockSize - 1) / blockSize; //helped by Gangzheng Tong
dim3 blocksPerGridParticle(gridSizeParticle);
int gridSizeGridCell = (gridCellCount + blockSize - 1) / blockSize;
dim3 blocksPerGridGridCell(gridSizeGridCell);
kernComputeIndices << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); //sort by grid index
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//first initialize the two indices list to be -1
kernResetIntBuffer << < blocksPerGridGridCell, threadsPerBlock >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << < blocksPerGridGridCell, threadsPerBlock >> > (gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernReshuffleArray << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_pos_buffer);
kernReshuffleArray << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_vel1, dev_vel1_buffer);
//reassign the pos and vel buffer back to pos and vel1
hipMemcpy(dev_pos, dev_pos_buffer, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
hipMemcpy(dev_vel1, dev_vel1_buffer, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << < blocksPerGridParticle, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
//dev_pos has been reshuffled -- dev_vel2 also need to bu reshuffled
// - Update positions
kernUpdatePos << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. -- shuffle back and store back
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
//2.1 free
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_vel1_buffer);
hipFree(dev_vel2_buffer);
hipFree(dev_pos_buffer);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
} | 90cd4a05cf96378ae7cc7ed223a53efee17d7462.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define CELL_WIDTH_FACTOR 2.0f
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 200
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos_buffer;
glm::vec3 *dev_vel1_buffer;
glm::vec3 *dev_vel2_buffer;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
//initialize numObjects here
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = CELL_WIDTH_FACTOR * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
//these two arrays only use to store cell information, so no need to be as many as numObjects
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
//2.3 additional buffer to help reshuffle
cudaMalloc((void**)&dev_pos_buffer, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos_buffer failed!");
cudaMalloc((void**)&dev_vel1_buffer, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1_buffer failed!");
cudaMalloc((void**)&dev_vel2_buffer, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2_buffer failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
glm::vec3 perceived_center(0.f,0.f,0.f);
glm::vec3 avoidance_velocity(0.f, 0.f, 0.f);
glm::vec3 perceived_velocity(0.f, 0.f, 0.f);
glm::vec3 return_vel(0.f, 0.f, 0.f);
int neighbor_count_rule1 = 0;
int neighbor_count_rule3 = 0;
//pre load all needed data
glm::vec3 curr_boid_pos = pos[iSelf];
for (int idx = 0; idx < N; ++idx)
{
//if b = boid skip the rest actions
if (idx == iSelf) continue;
//load current boid pos
glm::vec3 idx_boid_pos = pos[idx];
float dist = glm::distance(idx_boid_pos, curr_boid_pos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist <= rule1Distance)
{
perceived_center += idx_boid_pos;
neighbor_count_rule1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist <= rule2Distance)
{
avoidance_velocity -= (idx_boid_pos - curr_boid_pos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist <= rule3Distance)
{
perceived_velocity += vel[idx];
neighbor_count_rule3++;
}
}
//if we use N-1, the particles will shrink to the center of cube -- helped by Hannar
glm::vec3 rule1_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule2_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule3_component = glm::vec3(0.f, 0.f, 0.f);
if (neighbor_count_rule1 > 0)
{
rule1_component = (perceived_center / (float)neighbor_count_rule1 - curr_boid_pos) * rule1Scale;
}
rule2_component = avoidance_velocity * rule2Scale;
if (neighbor_count_rule3 > 0)
{
rule3_component = (perceived_velocity / (float)neighbor_count_rule3) * rule3Scale;
}
//helped by Hanna ReadMe Rule part sum all rules' and current velocity
return_vel += vel[iSelf] + rule1_component + rule2_component + rule3_component;
return return_vel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
//Compute the index of current thread
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > N)
{
return;
}
glm::vec3 new_velocity = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float curr_speed = glm::length(new_velocity);
//if the total speed of vel is larger than maxSpeed, we normalize the vel and apply the maxSpeed we allow -- do we need to care negative speed?
if (curr_speed > maxSpeed)
{
new_velocity = glm::normalize(new_velocity) * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1? --- because other boids might need that
vel2[index] = new_velocity;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x) tick
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
//compute the correspond index in x, y, z axis and use gridIndex3Dto1D to store the actual 1D index -- why we need inverseCellWidth -- mult is faster than divide
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
//pre store info
glm::vec3 curr_pos = pos[idx];
int idx_x = (curr_pos.x - gridMin.x) * inverseCellWidth;
int idx_y = (curr_pos.y - gridMin.y) * inverseCellWidth;
int idx_z = (curr_pos.z - gridMin.z) * inverseCellWidth;
//combine to get the 1D index
int gridIndex = gridIndex3Dto1D(idx_x, idx_y, idx_z, gridResolution);
//store to indices and gridIndices correspondingly
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[idx] = idx;
gridIndices[idx] = gridIndex;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
//what happen for those we don't include any particle? is there an identifier to show its identity?
//so why do we need th Particles array? we don't even pass in it -- the arranged start and end indices are for us to access dev_particleArrayIndices, which has been sorted
//may have better way to do
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
int target_grid_index = particleGridIndices[idx];
//look at the one before and the one after, if diff, store
//head must be a start
if (idx == 0)
{
gridCellStartIndices[target_grid_index] = idx;
}
if (idx == N - 1)
{
gridCellEndIndices[target_grid_index] = idx;
return;
}
//test
//const int nextGrid = particleGridIndices[idx + 1];
//if (target_grid_index != nextGrid)
//{
// gridCellEndIndices[target_grid_index] = idx;
// gridCellStartIndices[nextGrid] = idx + 1;
//}
//original
//check one before and one after
if (target_grid_index != 0 && target_grid_index != particleGridIndices[idx - 1])
{
//start of a cell
gridCellStartIndices[target_grid_index] = idx;
}
if (target_grid_index != N - 1 && target_grid_index != particleGridIndices[idx + 1])
{
gridCellEndIndices[target_grid_index] = idx;
}
}
//very similar to photon mapping
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 curr_boid_pos = pos[idx];
int idx_x = (curr_boid_pos.x - gridMin.x) * inverseCellWidth;
int idx_y = (curr_boid_pos.y - gridMin.y) * inverseCellWidth;
int idx_z = (curr_boid_pos.z - gridMin.z) * inverseCellWidth;
//combine to get the 1D index
int gridIndex = gridIndex3Dto1D(idx_x, idx_y, idx_z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
//by calculating those cells that interact with the sphere with the neighbor_radius(std::max(std::max(rule1Distance, rule2Distance), rule3Distance)) -not allowed unless it is const float
const float neighbor_radius = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
//incorrect here, but why?
//int max_x = glm::floor((curr_boid_pos.x + neighbor_radius - gridMin.x) * inverseCellWidth);
//max_x = max_x > gridResolution - 1 ? gridResolution - 1 : max_x;
//int min_x = glm::floor((curr_boid_pos.x - neighbor_radius - gridMin.x) * inverseCellWidth);
//min_x = min_x < 0 ? 0 : min_x;
//int max_y = glm::floor((curr_boid_pos.y + neighbor_radius - gridMin.y) * inverseCellWidth);
//max_y = max_y > gridResolution - 1 ? gridResolution - 1 : max_y;
//int min_y = glm::floor((curr_boid_pos.y + neighbor_radius - gridMin.y) * inverseCellWidth);
//min_y = min_y < 0 ? 0 : min_y;
//int max_z = glm::floor((curr_boid_pos.z + neighbor_radius - gridMin.z) * inverseCellWidth);
//max_z = max_z > gridResolution - 1 ? gridResolution - 1 : max_z;
//int min_z = glm::floor((curr_boid_pos.z + neighbor_radius - gridMin.z) * inverseCellWidth);
//min_z = min_z < 0 ? 0 : min_z;
//find the minimum index and max index of the cell we might need to use
glm::vec3 grid_index_min = glm::floor((curr_boid_pos - gridMin - glm::vec3(neighbor_radius)) * inverseCellWidth);
glm::vec3 grid_index_max = glm::floor((curr_boid_pos - gridMin + glm::vec3(neighbor_radius)) * inverseCellWidth);
int max_x = imin(gridResolution - 1, grid_index_max.x);
int max_y = imin(gridResolution - 1, grid_index_max.y);
int max_z = imin(gridResolution - 1, grid_index_max.z);
int min_x = imax(0, grid_index_min.x);
int min_y = imax(0, grid_index_min.y);
int min_z = imax(0, grid_index_min.z);
// - For each cell, read the start/end indices in the boid pointer array.
glm::vec3 perceived_center(0.f, 0.f, 0.f);
glm::vec3 avoidance_velocity(0.f, 0.f, 0.f);
glm::vec3 perceived_velocity(0.f, 0.f, 0.f);
glm::vec3 new_velocity(0.f, 0.f, 0.f);
int neighbor_count_rule1 = 0;
int neighbor_count_rule3 = 0;
//is this a grid-looping optimization?
for (int x_cord_idx = min_x; x_cord_idx <= max_x; ++x_cord_idx)
{
for (int y_cord_idx = min_y; y_cord_idx <= max_y; ++y_cord_idx)
{
for (int z_cord_idx = min_z; z_cord_idx <= max_z; ++z_cord_idx)
{
int curr_gridIndex = gridIndex3Dto1D(x_cord_idx, y_cord_idx, z_cord_idx, gridResolution);
//read the start/end indices
int start = gridCellStartIndices[curr_gridIndex];
int end = gridCellEndIndices[curr_gridIndex];
if (start == -1 || end == -1) {
continue; //no boid in this cell
}
else
{
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int boid_array_idx = start; boid_array_idx <= end; ++boid_array_idx)
{
//the boid_array_idx is only the index in particleArray, need to load it out -- buggy for only few particles moving
int boid_idx = particleArrayIndices[boid_array_idx];
if (boid_idx == idx) continue;
//pre load temp boid pos
glm::vec3 idx_boid_pos = pos[boid_idx];
float dist = glm::distance(curr_boid_pos, idx_boid_pos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist <= rule1Distance)
{
perceived_center += idx_boid_pos;
neighbor_count_rule1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist <= rule2Distance)
{
avoidance_velocity -= (idx_boid_pos - curr_boid_pos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist <= rule3Distance)
{
//use the boid_idx to access its speed, not the origianl speed ....
//perceived_velocity += vel1[idx];
perceived_velocity += vel1[boid_idx];
neighbor_count_rule3++;
}
}
}
}
}
}
//compute the new velocity
glm::vec3 rule1_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule2_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule3_component = glm::vec3(0.f, 0.f, 0.f);
if (neighbor_count_rule1 > 0)
{
rule1_component = (perceived_center / (float)neighbor_count_rule1 - curr_boid_pos) * rule1Scale;
}
rule2_component = avoidance_velocity * rule2Scale;
if (neighbor_count_rule3 > 0)
{
rule3_component = (perceived_velocity / (float)neighbor_count_rule3) * rule3Scale;
}
new_velocity += vel1[idx] + rule1_component + rule2_component + rule3_component;
// - Clamp the speed change before putting the new speed in vel2
float curr_speed = glm::length(new_velocity);
//if the total speed of vel is larger than maxSpeed, we normalize the vel and apply the maxSpeed we allow
if (curr_speed > maxSpeed)
{
new_velocity = glm::normalize(new_velocity) * maxSpeed;
}
vel2[idx] = new_velocity;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
glm::vec3 curr_boid_pos = pos[idx];
int idx_x = (curr_boid_pos.x - gridMin.x) * inverseCellWidth;
int idx_y = (curr_boid_pos.y - gridMin.y) * inverseCellWidth;
int idx_z = (curr_boid_pos.z - gridMin.z) * inverseCellWidth;
//combine to get the 1D index
int gridIndex = gridIndex3Dto1D(idx_x, idx_y, idx_z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
//by calculating those cells that interact with the sphere with the neighbor_radius(std::max(std::max(rule1Distance, rule2Distance), rule3Distance))
const float neighbor_radius = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
glm::vec3 grid_index_min = glm::floor((curr_boid_pos - gridMin - glm::vec3(neighbor_radius)) * inverseCellWidth);
glm::vec3 grid_index_max = glm::floor((curr_boid_pos - gridMin + glm::vec3(neighbor_radius)) * inverseCellWidth);
int max_x = imin(gridResolution - 1, grid_index_max.x);
int max_y = imin(gridResolution - 1, grid_index_max.y);
int max_z = imin(gridResolution - 1, grid_index_max.z);
int min_x = imax(0, grid_index_min.x);
int min_y = imax(0, grid_index_min.y);
int min_z = imax(0, grid_index_min.z);
// - For each cell, read the start/end indices in the boid pointer array.
glm::vec3 perceived_center(0.f, 0.f, 0.f);
glm::vec3 avoidance_velocity(0.f, 0.f, 0.f);
glm::vec3 perceived_velocity(0.f, 0.f, 0.f);
glm::vec3 new_velocity(0.f, 0.f, 0.f);
int neighbor_count_rule1 = 0;
int neighbor_count_rule3 = 0;
//is this a grid-looping optimization?
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int z_cord_idx = min_z; z_cord_idx <= max_z; ++z_cord_idx)
{
for (int y_cord_idx = min_y; y_cord_idx <= max_y; ++y_cord_idx)
{
for (int x_cord_idx = min_x; x_cord_idx <= max_x; ++x_cord_idx)
{
int curr_gridIndex = gridIndex3Dto1D(x_cord_idx, y_cord_idx, z_cord_idx, gridResolution);
//read the start/end indices
int start = gridCellStartIndices[curr_gridIndex];
int end = gridCellEndIndices[curr_gridIndex];
if (start == -1 || end == -1) {
continue; //no boid in this cell
}
else
{
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int boid_array_idx = start; boid_array_idx <= end; ++boid_array_idx)
{
//the boid_array_idx is only the index in particleArray, need to load it out -- buggy for only few particles moving
int boid_idx = boid_array_idx;
if (boid_idx == idx) continue;
//pre load temp boid pos
glm::vec3 idx_boid_pos = pos[boid_idx];
float dist = glm::distance(idx_boid_pos, curr_boid_pos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist <= rule1Distance)
{
perceived_center += idx_boid_pos;
neighbor_count_rule1++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist <= rule2Distance)
{
avoidance_velocity -= (idx_boid_pos - curr_boid_pos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist <= rule3Distance)
{
perceived_velocity += vel1[boid_idx];
neighbor_count_rule3++;
}
}
}
}
}
}
//compute the new velocity
glm::vec3 rule1_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule2_component = glm::vec3(0.f, 0.f, 0.f);
glm::vec3 rule3_component = glm::vec3(0.f, 0.f, 0.f);
if (neighbor_count_rule1 > 0)
{
rule1_component = (perceived_center / (float)neighbor_count_rule1 - curr_boid_pos) * rule1Scale;
}
rule2_component = avoidance_velocity * rule2Scale;
if (neighbor_count_rule3 > 0)
{
rule3_component = (perceived_velocity / (float)neighbor_count_rule3) * rule3Scale;
}
new_velocity = vel1[idx] + rule1_component + rule2_component + rule3_component;
// - Clamp the speed change before putting the new speed in vel2
float curr_speed = glm::length(new_velocity);
//if the total speed of vel is larger than maxSpeed, we normalize the vel and apply the maxSpeed we allow
if (curr_speed > maxSpeed)
{
new_velocity = glm::normalize(new_velocity) * maxSpeed;
}
vel2[idx] = new_velocity;
}
__global__ void kernReshuffleArray(int N, int *particle_array_indices, glm::vec3 *source, glm::vec3 *destination)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
int particle_idx = particle_array_indices[idx];
destination[idx] = source[particle_idx];
}
__global__ void kernReverseArray(int N, int *particle_array_indices, glm::vec3 *source, glm::vec3 *destination)
{
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx >= N) {
return;
}
int particle_idx = particle_array_indices[idx];
destination[particle_idx] = source[idx];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
int gridSize = (numObjects + blockSize - 1) / blockSize; //helped by Gangzheng Tong
dim3 blocksPerGrid(gridSize);
//first compute the new velocity
kernUpdateVelocityBruteForce <<< blocksPerGrid, threadsPerBlock >>> (numObjects, dev_pos, dev_vel1, dev_vel2);
//Then update the pos
kernUpdatePos << < blocksPerGrid, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers -- swap content
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
//set up blocks
int gridSizeParticle = (numObjects + blockSize - 1) / blockSize; //helped by Gangzheng Tong
dim3 blocksPerGridParticle(gridSizeParticle);
int gridSizeGridCell = (gridCellCount + blockSize - 1) / blockSize;
dim3 blocksPerGridGridCell(gridSizeGridCell);
//call kernel
kernComputeIndices << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); //sort by grid index
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//first initialize the two indices list to be -1
kernResetIntBuffer <<< blocksPerGridGridCell, threadsPerBlock >>> (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer <<< blocksPerGridGridCell, threadsPerBlock >>> (gridCellCount, dev_gridCellEndIndices, -1);
//then call the kernel to compute those who contain boids
kernIdentifyCellStartEnd <<< blocksPerGridParticle, threadsPerBlock >>> (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << < blocksPerGridParticle, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
// - Update positions
kernUpdatePos << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
int gridSizeParticle = (numObjects + blockSize - 1) / blockSize; //helped by Gangzheng Tong
dim3 blocksPerGridParticle(gridSizeParticle);
int gridSizeGridCell = (gridCellCount + blockSize - 1) / blockSize;
dim3 blocksPerGridGridCell(gridSizeGridCell);
kernComputeIndices << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices); //sort by grid index
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
//first initialize the two indices list to be -1
kernResetIntBuffer << < blocksPerGridGridCell, threadsPerBlock >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << < blocksPerGridGridCell, threadsPerBlock >> > (gridCellCount, dev_gridCellEndIndices, -1);
kernIdentifyCellStartEnd << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernReshuffleArray << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_pos, dev_pos_buffer);
kernReshuffleArray << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dev_particleArrayIndices, dev_vel1, dev_vel1_buffer);
//reassign the pos and vel buffer back to pos and vel1
cudaMemcpy(dev_pos, dev_pos_buffer, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vel1, dev_vel1_buffer, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << < blocksPerGridParticle, threadsPerBlock >> > (
numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
//dev_pos has been reshuffled -- dev_vel2 also need to bu reshuffled
// - Update positions
kernUpdatePos << < blocksPerGridParticle, threadsPerBlock >> > (numObjects, dt, dev_pos, dev_vel2);
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. -- shuffle back and store back
glm::vec3 *temp_vel = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp_vel;
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
//2.1 free
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_vel1_buffer);
cudaFree(dev_vel2_buffer);
cudaFree(dev_pos_buffer);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
} |
a648b465fd37fad44bff469021ef6f7a45f0a046.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file normalize_gpu.c
// @brief Normalize block implementation (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "normalize.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
/* ---------------------------------------------------------------- */
/* normalize_forward_kernel */
/* ---------------------------------------------------------------- */
#undef xat
#undef yat
#undef zat
#define xat(t) x[(t) * offset]
#define yat(t) y[(t) * offset]
#define zat(t) z[(t) * offset]
#define __powf powf
template<typename T> __global__ void
normalize_forward_kernel
(T* output,
T const* data,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
int t ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = output + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = -m2 ; t < (signed)depth ; ++t) {
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; }
if (t+m2 < depth) { ap = xat(t+m2) ; }
acc += ap*ap - am*am ;
if (0 <= t && t < depth) {
yat(t) = xat(t) * __powf(kappa + alpha * acc, -beta) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* normalize_backward_kernel */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
normalize_backward_kernel
(T* output,
T const* data,
T const* dzdy,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
T ab2 = 2*alpha*beta ;
int t, q ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = output + u0 + (v0 + k0 * (depth*height)) * width ;
T const* z = dzdy + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = 0 ; t < (signed)depth ; ++t) {
yat(t) = 0 ;
}
for (t = -m2 ; t < (signed)depth ; ++t) {
int q1 = t-m1 ;
int q2 = t+m2 ;
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; } else { q1 = 0 ; }
if (t+m2 < depth) { ap = xat(t+m2) ; } else { q2 = depth - 1 ; }
acc += ap*ap - am*am ;
T L = kappa + alpha * acc ;
T Lbeta = __powf(L, -beta) ;
T Lbeta1 = Lbeta / L ;
if (0 <= t && t < depth) {
yat(t) += zat(t) * Lbeta ;
for (q = q1 ; q <= q2 ; ++ q) {
yat(q) -= zat(t) * xat(t) * xat(q) * ab2 * Lbeta1 ;
}
}
}
}
}
/* ---------------------------------------------------------------- */
/* drivers */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template<typename type>
struct lrn<vl::GPU, type>
{
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::Error
forward(type * output,
type const* data,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
type kappa, type alpha, type beta)
{
hipLaunchKernelGGL(( normalize_forward_kernel<type >)
, dim3(divideUpwards(width*height*size, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
output, data, width, height, depth, size, normDepth, kappa, alpha, beta) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::Error
backward(type * derData,
type const* data,
type const* derOutput,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
type kappa, type alpha, type beta)
{
hipLaunchKernelGGL(( normalize_backward_kernel<type >)
, dim3(divideUpwards(width*height*size, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data, derOutput, width, height, depth, size, normDepth, kappa, alpha, beta) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ;
} }
// Instantiations
template struct vl::impl::lrn<vl::GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::lrn<vl::GPU, double> ;
#endif
| a648b465fd37fad44bff469021ef6f7a45f0a046.cu | // @file normalize_gpu.c
// @brief Normalize block implementation (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "normalize.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
/* ---------------------------------------------------------------- */
/* normalize_forward_kernel */
/* ---------------------------------------------------------------- */
#undef xat
#undef yat
#undef zat
#define xat(t) x[(t) * offset]
#define yat(t) y[(t) * offset]
#define zat(t) z[(t) * offset]
#define __powf powf
template<typename T> __global__ void
normalize_forward_kernel
(T* output,
T const* data,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
int t ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = output + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = -m2 ; t < (signed)depth ; ++t) {
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; }
if (t+m2 < depth) { ap = xat(t+m2) ; }
acc += ap*ap - am*am ;
if (0 <= t && t < depth) {
yat(t) = xat(t) * __powf(kappa + alpha * acc, -beta) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* normalize_backward_kernel */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
normalize_backward_kernel
(T* output,
T const* data,
T const* dzdy,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
T ab2 = 2*alpha*beta ;
int t, q ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = output + u0 + (v0 + k0 * (depth*height)) * width ;
T const* z = dzdy + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = 0 ; t < (signed)depth ; ++t) {
yat(t) = 0 ;
}
for (t = -m2 ; t < (signed)depth ; ++t) {
int q1 = t-m1 ;
int q2 = t+m2 ;
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; } else { q1 = 0 ; }
if (t+m2 < depth) { ap = xat(t+m2) ; } else { q2 = depth - 1 ; }
acc += ap*ap - am*am ;
T L = kappa + alpha * acc ;
T Lbeta = __powf(L, -beta) ;
T Lbeta1 = Lbeta / L ;
if (0 <= t && t < depth) {
yat(t) += zat(t) * Lbeta ;
for (q = q1 ; q <= q2 ; ++ q) {
yat(q) -= zat(t) * xat(t) * xat(q) * ab2 * Lbeta1 ;
}
}
}
}
}
/* ---------------------------------------------------------------- */
/* drivers */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template<typename type>
struct lrn<vl::GPU, type>
{
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::Error
forward(type * output,
type const* data,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
type kappa, type alpha, type beta)
{
normalize_forward_kernel<type >
<<< divideUpwards(width*height*size, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(output, data, width, height, depth, size, normDepth, kappa, alpha, beta) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
/* ------------------------------------------------------------ */
/* forward */
/* ------------------------------------------------------------ */
static vl::Error
backward(type * derData,
type const* data,
type const* derOutput,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
type kappa, type alpha, type beta)
{
normalize_backward_kernel<type >
<<< divideUpwards(width*height*size, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data, derOutput, width, height, depth, size, normDepth, kappa, alpha, beta) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
} ;
} }
// Instantiations
template struct vl::impl::lrn<vl::GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::lrn<vl::GPU, double> ;
#endif
|
751891f22712ba8ee13ad7ba21db4167dfb68fda.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_ptr.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam
, int iter
, int traceDepth
, PathSegment * pathSegments
)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.accumColor = glm::vec3(0.f);
segment.throughput = segment.color;
segment.hitSpecularObject = false;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, index);
thrust::uniform_real_distribution<float> u_onehalf(-0.5f, 0.5f);
float xJitter = u_onehalf(rng) * cam.pixelLength.x;
float yJitter = u_onehalf(rng) * cam.pixelLength.y;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
+ glm::vec3(xJitter, yJitter, 0.f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
__device__ float SDF(
glm::vec3 pos
, enum ImplicitSurfaceType type
, float scale
)
{
switch (type)
{
case NONE:
return 0.f; // degenerate case
case SPHERE_IMPLICIT:
return min(length(pos) - scale, length(pos - glm::vec3(0.f, 0.5f, 0.f)) - scale * 0.5f);
case MANDELBULB:
// This SDF for the Mandelbulb belongs to Inigo Quilez: https://www.shadertoy.com/view/ltfSWn
const float power = 8.f;
pos /= scale;
glm::vec3 w = pos;
float m = dot(w, w);
glm::vec4 trap = glm::vec4(abs(w), m);
float dz = 1.0;
for (int i = 0; i < 4; i++)
{
#if 1
float m2 = m*m;
float m4 = m2*m2;
dz = power*sqrt(m4*m2*m)*dz + 1.0;
float x = w.x; float x2 = x*x; float x4 = x2*x2;
float y = w.y; float y2 = y*y; float y4 = y2*y2;
float z = w.z; float z2 = z*z; float z4 = z2*z2;
float k3 = x2 + z2;
float k2 = glm::inversesqrt(k3*k3*k3*k3*k3*k3*k3);
float k1 = x4 + y4 + z4 - 6.0*y2*z2 - 6.0*x2*y2 + 2.0*z2*x2;
float k4 = x2 - y2 + z2;
w.x = pos.x + 64.0*x*y*z*(x2 - z2)*k4*(x4 - 6.0*x2*z2 + z4)*k1*k2;
w.y = pos.y + -16.0*y2*k3*k4*k4 + k1*k1;
w.z = pos.z + -8.0*y*k4*(x4*x4 - 28.0*x4*x2*z2 + 70.0*x4*z4 - 28.0*x2*z2*z4 + z4*z4)*k1*k2;
#else
dz = 8.0*pow(m, 3.5)*dz + 1.0;
float r = length(w);
float b = 8.0*acos(clamp(w.y / r, -1.0, 1.0));
float a = 8.0*atan(w.x, w.z);
w = p + pow(r, 8.0) * vec3(sin(b)*sin(a), cos(b), sin(b)*cos(a));
#endif
trap = min(trap, glm::vec4(abs(w), m));
m = dot(w, w);
if (m > 4.0)
break;
}
trap.x = m;
return 0.25*log(m)*sqrt(m) / dz;
}
return 0.f;
}
__device__ glm::vec3 ComputeNormal(
glm::vec3 pos
, float currDist
, enum ImplicitSurfaceType type
, float scale
)
{
return glm::normalize(glm::vec3(SDF(pos + glm::vec3(1.f * EPSILON, 0.f, 0.f), type, scale) - SDF(pos - glm::vec3(1.f * EPSILON, 0.f, 0.f), type, scale),
SDF(pos + glm::vec3(0.f, 1.f * EPSILON, 0.f), type, scale) - SDF(pos - glm::vec3(0.f, 1.f * EPSILON, 0.f), type, scale),
SDF(pos + glm::vec3(0.f, 0.f, 1.f * EPSILON), type, scale) - SDF(pos - glm::vec3(0.f, 0.f, 1.f * EPSILON), type, scale)));
}
__device__ void ComputeIntersectionsHelper(
PathSegment & pathSegment
, Geom* geoms
, int geoms_size
, ShadeableIntersection & currentIsect
, int ignoreIndex
)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
if (i != ignoreIndex)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == PLANE)
{
t = planeIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == IMPLICITBOUNDINGVOLUME)
{
// Compute intersection w/ the bounding sphere, mark this path segment as currently being in contact with an
// implicit surface. run a raymarching kernel for all path segments that says
// if this path segment is the right index && is hitting an implicit surface, while t < tmax, march
// - also need to compute the max t value for the marching limit
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
}
if (hit_geom_index == -1)
{
currentIsect.t = -1.0f;
}
else
{
Geom & hitGeom = geoms[hit_geom_index];
//The ray hits something
currentIsect.t = t_min;
currentIsect.materialId = geoms[hit_geom_index].materialid;
currentIsect.surfaceNormal = normal;
if (hitGeom.type == IMPLICITBOUNDINGVOLUME)
{
if (outside) // do the raymarch below
{
bool newOutside;
Ray newRay = pathSegment.ray;
newRay.origin = tmp_intersect + 0.01f * newRay.direction;
glm::vec3 isectPos, isectNormal;
float tMax = sphereIntersectionTest(hitGeom, newRay, isectPos, isectNormal, newOutside);
tMax += t_min;
// Now raymarch
const int numSteps = 2000;
int i = 0;
float rayT = t_min;
const float scale = 5.f;
float distance;
glm::vec3 currentPos;
while (rayT <= tMax && i < numSteps)
{
currentPos = pathSegment.ray.origin + rayT * pathSegment.ray.direction;
distance = SDF(currentPos - hitGeom.translation, hitGeom.implicitType, scale);
if (distance < 0.001f) // very close to the surface
{
currentIsect.t = rayT;
currentIsect.surfaceNormal = ComputeNormal(currentPos - hitGeom.translation
, distance
, hitGeom.implicitType
, scale); // compute implicit normals using gradient method
// Material ID is already set for this intersection
return;
}
rayT += distance;
i++;
}
// If we don't march into anything, intersect with the rest of the scene
ComputeIntersectionsHelper(pathSegment, geoms, geoms_size, currentIsect, hit_geom_index); // ignore this bounding volume
}
else // Already inside, so compute the tMax, march to it, if we don't hit the implicit surface, then compute intersection with the rest of the scene
{
bool newOutside;
Ray newRay = pathSegment.ray;
newRay.origin = tmp_intersect;
glm::vec3 isectPos, isectNormal;
float tMax = currentIsect.t;
// Now raymarch
const int numSteps = 2000;
int i = 0;
float rayT = 0.0001f;
const float scale = 5.f;
float distance;
glm::vec3 currentPos;
while (rayT <= tMax && i < numSteps)
{
currentPos = pathSegment.ray.origin + rayT * pathSegment.ray.direction;
distance = SDF(currentPos - hitGeom.translation, hitGeom.implicitType, scale);
if (distance < 0.001f) // very close to the surface
{
currentIsect.t = rayT;
currentIsect.surfaceNormal = ComputeNormal(currentPos - hitGeom.translation
, distance
, hitGeom.implicitType
, scale); // compute implicit normals using gradient method
// Material ID is already set for this intersection
return;
}
rayT += distance;
i++;
}
// If we don't march into anything, intersect with the rest of the scene
ComputeIntersectionsHelper(pathSegment, geoms, geoms_size, currentIsect, hit_geom_index); // ignore this bounding volume
}
}
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
ComputeIntersectionsHelper(pathSegments[path_index], geoms, geoms_size, intersections[path_index], -1);
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
__device__ glm::vec3 ComputeDirectLighting(
int index
, int iter
, int nLights
, int numGeoms
, PathSegment & pathSegment
, ShadeableIntersection & shadeableIsect
, Material * materials
, Geom * geometry
, Geom & chosenLight
, float & pdf_BSDF
, float & pdf_Light
)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, pathSegment.remainingBounces + index * index);
thrust::uniform_real_distribution<float> u01(0, 1);
Material & mat = materials[shadeableIsect.materialId];
// Light Importance Sampling - sample a position on a randomly chosen light in the scene
int randIndex = min(floor(u01(rng) * (float)nLights), (float)(nLights - 1));
Geom & lightToSample = geometry[randIndex]; // this is ok b/c we ensure in the scene file that lights are listed first in the list of geometry
glm::vec3 sampleOnLight = glm::vec3(u01(rng) - 0.5f, u01(rng) - 0.5f, 0); // we assume plane-shaped lights here
sampleOnLight = multiplyMV(lightToSample.transform, glm::vec4(sampleOnLight, 1.f)); // put the sample into world space
// Output the chosen light because for MIS, it is needed for BSDF Importance Sampling
chosenLight = lightToSample;
/* Compute the various components needed for the LTE */
glm::vec3 f, isectPos, normal, lightNormal;
float pdf, absDot;
// Compute intersection parameters and compute absDot
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
normal = glm::normalize(shadeableIsect.surfaceNormal);
glm::vec3 lightSampleDirection = glm::normalize(sampleOnLight - isectPos);
absDot = abs(glm::dot(lightSampleDirection, normal));
// Compute the pdf - in steradians (solid angle)!
Ray newRay;
newRay.origin = isectPos + normal * 10.f * EPSILON; // the scale determined by trial and error
newRay.direction = lightSampleDirection;
lightNormal = glm::normalize(multiplyMV(lightToSample.invTranspose, glm::vec4(0.f, 0.f, 1.f, 0.f)));
pdf = ComputeLightPDF(lightToSample, lightNormal, newRay);
// Store light color
Material & lightMat = materials[chosenLight.materialid];
glm::vec3 lightColor = lightMat.color * lightMat.emittance;
// Compute visibility term
PathSegment psCopy = pathSegment;
psCopy.ray = newRay;
ShadeableIntersection siCopy = shadeableIsect;
ComputeIntersectionsHelper(psCopy, geometry, numGeoms, siCopy, -1);
float vis = (siCopy.materialId == lightToSample.materialid) ? 1.f : 0.f;
// Compute f
switch (mat.bxdf)
{
case EMISSIVE:
pdf_BSDF = 1.0f;
pdf_Light = 1.0f;
return pathSegment.color * mat.color * mat.emittance;
case DIFFUSE:
pdf_BSDF = glm::dot(normal, lightSampleDirection) / PI;
f = mat.color / PI;
break;
case SPECULAR_BRDF:
pdf_BSDF = 0.0f;
f = glm::vec3(0.f); // specular materials are black in direct lighting
break;
}
// This check prevents fireflies from appearing (division by very small value creates blown-out pixels (the scale value determined by trial and error)
if (pdf <= 5.f * EPSILON)
{
pdf_Light = 0.0f;
return glm::vec3(0.f);
}
else
{
// Compute LTE
pdf_Light = pdf;
return f * lightColor * vis * absDot / pdf;
}
}
// Computes direct lighting, but performs BSDF Importance sampling rather than light importance sampling
__device__ glm::vec3 ComputeDirectLighting_BSDF(
int index
, int iter
, int nLights
, int numGeoms
, PathSegment & pathSegment
, ShadeableIntersection & shadeableIsect
, Material * materials
, Geom * geometry
, Geom & chosenLight
, float & pdf_Light
, float & pdf_BSDF
)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, pathSegment.remainingBounces + index * index);
Material & mat = materials[shadeableIsect.materialId];
/* Compute the various components needed for the LTE */
glm::vec3 f, isectPos, normal;
float pdf, absDot;
glm::vec3 bsdfSampleRayDir;
normal = glm::normalize(shadeableIsect.surfaceNormal);
switch (mat.bxdf)
{
case EMISSIVE:
pdf_BSDF = 1.0f;
pdf_Light = 1.0f;
return pathSegment.color * mat.color * mat.emittance;
case DIFFUSE:
bsdfSampleRayDir = glm::normalize(calculateRandomDirectionInHemisphere(normal, rng));
f = mat.color / PI;
pdf = glm::dot(normal, bsdfSampleRayDir) / PI;
break;
case SPECULAR_BRDF:
bsdfSampleRayDir = glm::normalize(glm::reflect(pathSegment.ray.direction, normal));
f = mat.color;
pdf = 1.0f;
break;
}
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
absDot = abs(glm::dot(bsdfSampleRayDir, normal));
// Compute the pdf - in steradians (solid angle)!
Ray newRay;
newRay.origin = isectPos + normal * 10.f * EPSILON; // the scale determined by trial and error
newRay.direction = bsdfSampleRayDir;
pdf_Light = ComputeLightPDF(chosenLight, normal, newRay);
// Store light color
Material & lightMat = materials[chosenLight.materialid];
glm::vec3 lightColor = lightMat.color * lightMat.emittance;
if (pdf_Light == 0.0f)
{
lightColor = glm::vec3(0.f);
}
// This check prevents fireflies from appearing (division by very small value creates blown-out pixels (the scale value determined by trial and error)
if (pdf <= 5.f * EPSILON)
{
pdf_BSDF = 0.0f;
return glm::vec3(0.f);
}
else
{
// Compute LTE
pdf_BSDF = pdf;
return f * lightColor * absDot / pdf;
}
}
__global__ void DirectLightingIntegrator(
int nPaths
, int iter
, int nLights
, int numGeoms
, PathSegment * iterationPaths
, ShadeableIntersection * shadeableIntersections
, Material * materials
, Geom * geometry
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
iterationPaths[index].remainingBounces = 0; // no more than one bounce in direct lighting!
if (shadeableIntersections[index].t > 0.0f)
{
PathSegment & pathSegment = iterationPaths[index];
Geom chosenLight; // Needed for MIS, but not in this integrator. Lets us reuse the direct lighting code for both integrators
float pdfa, pdfb; // Needed for MIS, buyt not in this integrator.
pathSegment.color = (float)nLights * ComputeDirectLighting(index, iter, nLights, numGeoms, pathSegment, shadeableIntersections[index], materials, geometry, chosenLight, pdfa, pdfb);
}
else
{
iterationPaths[index].color = glm::vec3(0.f);
}
}
}
__global__ void MISIntegrator(
int nPaths
, int iter
, int nLights
, int numGeoms
, PathSegment * iterationPaths
, ShadeableIntersection * shadeableIntersections
, Material * materials
, Geom * geometry
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment & pathSegment = iterationPaths[index];
if (shadeableIntersections[index].t > 0.0f)
{
if (pathSegment.remainingBounces == 0)
{
return;
}
//pathSegment.remainingBounces = 0; // THIS IS FOR DIRECT LIGHTING, WE ARE IN MIS INTEGRATOR ************************************ YOO
ShadeableIntersection & shadeableIsect = shadeableIntersections[index];
// Compute global illumination - this is nearly identical to Naive Integrator
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, iterationPaths[index].remainingBounces);
Material & mat = materials[shadeableIsect.materialId];
glm::vec3 f, globalIllumColor, isectPos, normal;
float pdf, absDot;
Ray sampledRay;
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
normal = glm::normalize(shadeableIsect.surfaceNormal);
switch (mat.bxdf)
{
case EMISSIVE:
pathSegment.color = pathSegment.throughput * mat.color * mat.emittance;
pathSegment.remainingBounces = 0;
return;
case DIFFUSE:
sampledRay.direction = glm::normalize(calculateRandomDirectionInHemisphere(normal, rng));
sampledRay.origin = (glm::dot(normal, sampledRay.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
pdf = glm::dot(sampledRay.direction, normal) / PI;
absDot = abs(glm::dot(sampledRay.direction, normal));
f = mat.color / PI;
pathSegment.remainingBounces--;
pathSegment.hitSpecularObject = false;
break;
case SPECULAR_BRDF:
sampledRay.direction = glm::normalize(glm::reflect(pathSegment.ray.direction, normal));
sampledRay.origin = (glm::dot(normal, sampledRay.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
f = glm::vec3(1.f);
pdf = 1.f;
absDot = 1.f; // specular objects don't attenuate color by absDot
pathSegment.remainingBounces--;
pathSegment.hitSpecularObject = true;
break;
}
// Perform no computations for this iteration
if (pathSegment.hitSpecularObject)
{
pathSegment.ray = sampledRay;
return;
}
globalIllumColor = (pdf <= 10.f * EPSILON) ? glm::vec3(0.f) : f * absDot / pdf;
// Compute direct lighting
Geom chosenLight;
float pdf_BSDF_Light; // BSDF-evaluated pdf using the light-importance sampled direction
float pdf_Light_BSDF; // Light-evaluated pdf using the BSDF-importance sampled direction
float pdf_Light; // Light evaluated, light-importance sampled
float pdf_BSDF; // BSDF evaluated, BSDF-importance sampled
glm::vec3 directLighting_Light = ComputeDirectLighting(index, iter, nLights, numGeoms, pathSegment,
shadeableIsect, materials, geometry, chosenLight, pdf_BSDF_Light, pdf_Light);
glm::vec3 directLighting_BSDF = ComputeDirectLighting_BSDF(index, iter, nLights, numGeoms, pathSegment,
shadeableIsect, materials, geometry, chosenLight, pdf_Light_BSDF, pdf_BSDF);
// Compute the proper weights using the various PDFs and power heuristic
float w_Light = PowerHeuristic(1, pdf_Light, 1, pdf_BSDF_Light);
float w_BSDF = PowerHeuristic(1, pdf_BSDF, 1, pdf_Light_BSDF);
glm::vec3 directLightingColor = (w_Light * directLighting_Light + w_BSDF * directLighting_BSDF) * (float)nLights;
// Set pathsegment properties
pathSegment.accumColor += directLightingColor * pathSegment.throughput;
pathSegment.color = pathSegment.accumColor; // so we don't have to have another final gather kernel designed to use accumColor instead of color
pathSegment.throughput *= globalIllumColor;
pathSegment.ray = sampledRay;
}
else
{
pathSegment.color = pathSegment.accumColor;
pathSegment.remainingBounces = 0;
}
}
}
__global__ void NaiveIntegrator(
int nPaths
, int iter
, PathSegment * iterationPaths
, ShadeableIntersection * shadeableIntersections
, Material * materials
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment & pathSegment = iterationPaths[index];
ShadeableIntersection & shadeableIsect = shadeableIntersections[index];
if (shadeableIsect.t > 0.0f)
{
if (pathSegment.remainingBounces == 0)
{
pathSegment.color = glm::vec3(0.f);
return;
}
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, iterationPaths[index].remainingBounces);
Material & mat = materials[shadeableIsect.materialId];
/* Compute each part of the LTE */
glm::vec3 f, finalColor, isectPos, normal;
float pdf, absDot;
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
normal = glm::normalize(shadeableIsect.surfaceNormal);
// Evaluate the BxDF and compute various parts of the LTE
switch (mat.bxdf)
{
case EMISSIVE:
pathSegment.color *= mat.color * mat.emittance;
pathSegment.remainingBounces = 0;
return;
case DIFFUSE:
pathSegment.ray.direction = glm::normalize(calculateRandomDirectionInHemisphere(normal, rng));
pathSegment.ray.origin = (glm::dot(normal, pathSegment.ray.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
pdf = glm::dot(pathSegment.ray.direction, normal) / PI;
absDot = abs(glm::dot(pathSegment.ray.direction, normal));
f = mat.color / PI;
pathSegment.remainingBounces--;
break;
case SPECULAR_BRDF:
pathSegment.ray.direction = glm::normalize(glm::reflect(pathSegment.ray.direction, normal));
pathSegment.ray.origin = (glm::dot(normal, pathSegment.ray.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
f = glm::vec3(1.f);
pdf = 1.f;
absDot = 1.f; // specular objects don't attenuate color by absDot
pathSegment.remainingBounces--;
break;
}
finalColor = f * pathSegment.color * absDot / pdf;
pathSegment.color = finalColor;
return;
}
else
{
// For all paths that have no intersection
pathSegment.color = glm::vec3(0.f);
pathSegment.remainingBounces = 0;
}
}
}
// Needed for Thrust stream compaction
struct is_active
{
__host__ __device__
bool operator()(const PathSegment &p)
{
return p.remainingBounces > 0;
}
};
struct compareIntersection
{
__host__ __device__
bool operator()(const ShadeableIntersection &s, const ShadeableIntersection &p)
{
return s.materialId < p.materialId;
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int num_active_paths = num_paths;
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
dim3 numblocksPathSegmentTracing = (num_active_paths + blockSize1d - 1) / blockSize1d;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
numblocksPathSegmentTracing = (num_active_paths + blockSize1d - 1) / blockSize1d;
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
computeIntersections << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
//#define SORT_BY_MATERIAL_TYPE
#ifdef SORT_BY_MATERIAL_TYPE
{
// sort by material type - this slows stuff down unless there are a lot of materials to make the sorting worth it
thrust::device_ptr<ShadeableIntersection> dev_Thrust_isects(dev_intersections);
thrust::device_ptr<PathSegment> dev_Thrust_paths(dev_paths);
thrust::sort_by_key(dev_Thrust_isects, dev_Thrust_isects + num_active_paths, dev_Thrust_paths, compareIntersection());
}
#endif
#define NAIVE
//#define DIRECT
//#define MIS
#ifdef NAIVE
{
// Compute naive integration
NaiveIntegrator << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, iter
, dev_paths
, dev_intersections
, dev_materials
);
checkCUDAError("Naive Integrator");
}
#else
{
#ifdef DIRECT
{
// Compute direct lighting integration
DirectLightingIntegrator << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, iter
, hst_scene->numLights
, hst_scene->geoms.size()
, dev_paths
, dev_intersections
, dev_materials
, dev_geoms
);
checkCUDAError("Direct Lighting Integrator");
}
#else
{
#ifdef MIS
{
// Compute MIS integration
MISIntegrator << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, iter
, hst_scene->numLights
, hst_scene->geoms.size()
, dev_paths
, dev_intersections
, dev_materials
, dev_geoms
);
checkCUDAError("MIS Integrator");
}
#endif
}
#endif
}
#endif
hipDeviceSynchronize();
#define STREAM_COMPACT // note if you remove this, stuff will break.
#ifdef STREAM_COMPACT
{
// Thrust stream compaction
PathSegment * compactedPaths = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, is_active());
num_active_paths = compactedPaths - dev_paths;
}
#endif
// Update iteration condition
iterationComplete = num_active_paths == 0 || depth > traceDepth;
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 751891f22712ba8ee13ad7ba21db4167dfb68fda.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/device_ptr.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam
, int iter
, int traceDepth
, PathSegment * pathSegments
)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.accumColor = glm::vec3(0.f);
segment.throughput = segment.color;
segment.hitSpecularObject = false;
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, index);
thrust::uniform_real_distribution<float> u_onehalf(-0.5f, 0.5f);
float xJitter = u_onehalf(rng) * cam.pixelLength.x;
float yJitter = u_onehalf(rng) * cam.pixelLength.y;
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
+ glm::vec3(xJitter, yJitter, 0.f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
__device__ float SDF(
glm::vec3 pos
, enum ImplicitSurfaceType type
, float scale
)
{
switch (type)
{
case NONE:
return 0.f; // degenerate case
case SPHERE_IMPLICIT:
return min(length(pos) - scale, length(pos - glm::vec3(0.f, 0.5f, 0.f)) - scale * 0.5f);
case MANDELBULB:
// This SDF for the Mandelbulb belongs to Inigo Quilez: https://www.shadertoy.com/view/ltfSWn
const float power = 8.f;
pos /= scale;
glm::vec3 w = pos;
float m = dot(w, w);
glm::vec4 trap = glm::vec4(abs(w), m);
float dz = 1.0;
for (int i = 0; i < 4; i++)
{
#if 1
float m2 = m*m;
float m4 = m2*m2;
dz = power*sqrt(m4*m2*m)*dz + 1.0;
float x = w.x; float x2 = x*x; float x4 = x2*x2;
float y = w.y; float y2 = y*y; float y4 = y2*y2;
float z = w.z; float z2 = z*z; float z4 = z2*z2;
float k3 = x2 + z2;
float k2 = glm::inversesqrt(k3*k3*k3*k3*k3*k3*k3);
float k1 = x4 + y4 + z4 - 6.0*y2*z2 - 6.0*x2*y2 + 2.0*z2*x2;
float k4 = x2 - y2 + z2;
w.x = pos.x + 64.0*x*y*z*(x2 - z2)*k4*(x4 - 6.0*x2*z2 + z4)*k1*k2;
w.y = pos.y + -16.0*y2*k3*k4*k4 + k1*k1;
w.z = pos.z + -8.0*y*k4*(x4*x4 - 28.0*x4*x2*z2 + 70.0*x4*z4 - 28.0*x2*z2*z4 + z4*z4)*k1*k2;
#else
dz = 8.0*pow(m, 3.5)*dz + 1.0;
float r = length(w);
float b = 8.0*acos(clamp(w.y / r, -1.0, 1.0));
float a = 8.0*atan(w.x, w.z);
w = p + pow(r, 8.0) * vec3(sin(b)*sin(a), cos(b), sin(b)*cos(a));
#endif
trap = min(trap, glm::vec4(abs(w), m));
m = dot(w, w);
if (m > 4.0)
break;
}
trap.x = m;
return 0.25*log(m)*sqrt(m) / dz;
}
return 0.f;
}
__device__ glm::vec3 ComputeNormal(
glm::vec3 pos
, float currDist
, enum ImplicitSurfaceType type
, float scale
)
{
return glm::normalize(glm::vec3(SDF(pos + glm::vec3(1.f * EPSILON, 0.f, 0.f), type, scale) - SDF(pos - glm::vec3(1.f * EPSILON, 0.f, 0.f), type, scale),
SDF(pos + glm::vec3(0.f, 1.f * EPSILON, 0.f), type, scale) - SDF(pos - glm::vec3(0.f, 1.f * EPSILON, 0.f), type, scale),
SDF(pos + glm::vec3(0.f, 0.f, 1.f * EPSILON), type, scale) - SDF(pos - glm::vec3(0.f, 0.f, 1.f * EPSILON), type, scale)));
}
__device__ void ComputeIntersectionsHelper(
PathSegment & pathSegment
, Geom* geoms
, int geoms_size
, ShadeableIntersection & currentIsect
, int ignoreIndex
)
{
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
if (i != ignoreIndex)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == PLANE)
{
t = planeIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == IMPLICITBOUNDINGVOLUME)
{
// Compute intersection w/ the bounding sphere, mark this path segment as currently being in contact with an
// implicit surface. run a raymarching kernel for all path segments that says
// if this path segment is the right index && is hitting an implicit surface, while t < tmax, march
// - also need to compute the max t value for the marching limit
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
}
if (hit_geom_index == -1)
{
currentIsect.t = -1.0f;
}
else
{
Geom & hitGeom = geoms[hit_geom_index];
//The ray hits something
currentIsect.t = t_min;
currentIsect.materialId = geoms[hit_geom_index].materialid;
currentIsect.surfaceNormal = normal;
if (hitGeom.type == IMPLICITBOUNDINGVOLUME)
{
if (outside) // do the raymarch below
{
bool newOutside;
Ray newRay = pathSegment.ray;
newRay.origin = tmp_intersect + 0.01f * newRay.direction;
glm::vec3 isectPos, isectNormal;
float tMax = sphereIntersectionTest(hitGeom, newRay, isectPos, isectNormal, newOutside);
tMax += t_min;
// Now raymarch
const int numSteps = 2000;
int i = 0;
float rayT = t_min;
const float scale = 5.f;
float distance;
glm::vec3 currentPos;
while (rayT <= tMax && i < numSteps)
{
currentPos = pathSegment.ray.origin + rayT * pathSegment.ray.direction;
distance = SDF(currentPos - hitGeom.translation, hitGeom.implicitType, scale);
if (distance < 0.001f) // very close to the surface
{
currentIsect.t = rayT;
currentIsect.surfaceNormal = ComputeNormal(currentPos - hitGeom.translation
, distance
, hitGeom.implicitType
, scale); // compute implicit normals using gradient method
// Material ID is already set for this intersection
return;
}
rayT += distance;
i++;
}
// If we don't march into anything, intersect with the rest of the scene
ComputeIntersectionsHelper(pathSegment, geoms, geoms_size, currentIsect, hit_geom_index); // ignore this bounding volume
}
else // Already inside, so compute the tMax, march to it, if we don't hit the implicit surface, then compute intersection with the rest of the scene
{
bool newOutside;
Ray newRay = pathSegment.ray;
newRay.origin = tmp_intersect;
glm::vec3 isectPos, isectNormal;
float tMax = currentIsect.t;
// Now raymarch
const int numSteps = 2000;
int i = 0;
float rayT = 0.0001f;
const float scale = 5.f;
float distance;
glm::vec3 currentPos;
while (rayT <= tMax && i < numSteps)
{
currentPos = pathSegment.ray.origin + rayT * pathSegment.ray.direction;
distance = SDF(currentPos - hitGeom.translation, hitGeom.implicitType, scale);
if (distance < 0.001f) // very close to the surface
{
currentIsect.t = rayT;
currentIsect.surfaceNormal = ComputeNormal(currentPos - hitGeom.translation
, distance
, hitGeom.implicitType
, scale); // compute implicit normals using gradient method
// Material ID is already set for this intersection
return;
}
rayT += distance;
i++;
}
// If we don't march into anything, intersect with the rest of the scene
ComputeIntersectionsHelper(pathSegment, geoms, geoms_size, currentIsect, hit_geom_index); // ignore this bounding volume
}
}
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
ComputeIntersectionsHelper(pathSegments[path_index], geoms, geoms_size, intersections[path_index], -1);
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
__device__ glm::vec3 ComputeDirectLighting(
int index
, int iter
, int nLights
, int numGeoms
, PathSegment & pathSegment
, ShadeableIntersection & shadeableIsect
, Material * materials
, Geom * geometry
, Geom & chosenLight
, float & pdf_BSDF
, float & pdf_Light
)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, pathSegment.remainingBounces + index * index);
thrust::uniform_real_distribution<float> u01(0, 1);
Material & mat = materials[shadeableIsect.materialId];
// Light Importance Sampling - sample a position on a randomly chosen light in the scene
int randIndex = min(floor(u01(rng) * (float)nLights), (float)(nLights - 1));
Geom & lightToSample = geometry[randIndex]; // this is ok b/c we ensure in the scene file that lights are listed first in the list of geometry
glm::vec3 sampleOnLight = glm::vec3(u01(rng) - 0.5f, u01(rng) - 0.5f, 0); // we assume plane-shaped lights here
sampleOnLight = multiplyMV(lightToSample.transform, glm::vec4(sampleOnLight, 1.f)); // put the sample into world space
// Output the chosen light because for MIS, it is needed for BSDF Importance Sampling
chosenLight = lightToSample;
/* Compute the various components needed for the LTE */
glm::vec3 f, isectPos, normal, lightNormal;
float pdf, absDot;
// Compute intersection parameters and compute absDot
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
normal = glm::normalize(shadeableIsect.surfaceNormal);
glm::vec3 lightSampleDirection = glm::normalize(sampleOnLight - isectPos);
absDot = abs(glm::dot(lightSampleDirection, normal));
// Compute the pdf - in steradians (solid angle)!
Ray newRay;
newRay.origin = isectPos + normal * 10.f * EPSILON; // the scale determined by trial and error
newRay.direction = lightSampleDirection;
lightNormal = glm::normalize(multiplyMV(lightToSample.invTranspose, glm::vec4(0.f, 0.f, 1.f, 0.f)));
pdf = ComputeLightPDF(lightToSample, lightNormal, newRay);
// Store light color
Material & lightMat = materials[chosenLight.materialid];
glm::vec3 lightColor = lightMat.color * lightMat.emittance;
// Compute visibility term
PathSegment psCopy = pathSegment;
psCopy.ray = newRay;
ShadeableIntersection siCopy = shadeableIsect;
ComputeIntersectionsHelper(psCopy, geometry, numGeoms, siCopy, -1);
float vis = (siCopy.materialId == lightToSample.materialid) ? 1.f : 0.f;
// Compute f
switch (mat.bxdf)
{
case EMISSIVE:
pdf_BSDF = 1.0f;
pdf_Light = 1.0f;
return pathSegment.color * mat.color * mat.emittance;
case DIFFUSE:
pdf_BSDF = glm::dot(normal, lightSampleDirection) / PI;
f = mat.color / PI;
break;
case SPECULAR_BRDF:
pdf_BSDF = 0.0f;
f = glm::vec3(0.f); // specular materials are black in direct lighting
break;
}
// This check prevents fireflies from appearing (division by very small value creates blown-out pixels (the scale value determined by trial and error)
if (pdf <= 5.f * EPSILON)
{
pdf_Light = 0.0f;
return glm::vec3(0.f);
}
else
{
// Compute LTE
pdf_Light = pdf;
return f * lightColor * vis * absDot / pdf;
}
}
// Computes direct lighting, but performs BSDF Importance sampling rather than light importance sampling
__device__ glm::vec3 ComputeDirectLighting_BSDF(
int index
, int iter
, int nLights
, int numGeoms
, PathSegment & pathSegment
, ShadeableIntersection & shadeableIsect
, Material * materials
, Geom * geometry
, Geom & chosenLight
, float & pdf_Light
, float & pdf_BSDF
)
{
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, pathSegment.remainingBounces + index * index);
Material & mat = materials[shadeableIsect.materialId];
/* Compute the various components needed for the LTE */
glm::vec3 f, isectPos, normal;
float pdf, absDot;
glm::vec3 bsdfSampleRayDir;
normal = glm::normalize(shadeableIsect.surfaceNormal);
switch (mat.bxdf)
{
case EMISSIVE:
pdf_BSDF = 1.0f;
pdf_Light = 1.0f;
return pathSegment.color * mat.color * mat.emittance;
case DIFFUSE:
bsdfSampleRayDir = glm::normalize(calculateRandomDirectionInHemisphere(normal, rng));
f = mat.color / PI;
pdf = glm::dot(normal, bsdfSampleRayDir) / PI;
break;
case SPECULAR_BRDF:
bsdfSampleRayDir = glm::normalize(glm::reflect(pathSegment.ray.direction, normal));
f = mat.color;
pdf = 1.0f;
break;
}
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
absDot = abs(glm::dot(bsdfSampleRayDir, normal));
// Compute the pdf - in steradians (solid angle)!
Ray newRay;
newRay.origin = isectPos + normal * 10.f * EPSILON; // the scale determined by trial and error
newRay.direction = bsdfSampleRayDir;
pdf_Light = ComputeLightPDF(chosenLight, normal, newRay);
// Store light color
Material & lightMat = materials[chosenLight.materialid];
glm::vec3 lightColor = lightMat.color * lightMat.emittance;
if (pdf_Light == 0.0f)
{
lightColor = glm::vec3(0.f);
}
// This check prevents fireflies from appearing (division by very small value creates blown-out pixels (the scale value determined by trial and error)
if (pdf <= 5.f * EPSILON)
{
pdf_BSDF = 0.0f;
return glm::vec3(0.f);
}
else
{
// Compute LTE
pdf_BSDF = pdf;
return f * lightColor * absDot / pdf;
}
}
__global__ void DirectLightingIntegrator(
int nPaths
, int iter
, int nLights
, int numGeoms
, PathSegment * iterationPaths
, ShadeableIntersection * shadeableIntersections
, Material * materials
, Geom * geometry
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
iterationPaths[index].remainingBounces = 0; // no more than one bounce in direct lighting!
if (shadeableIntersections[index].t > 0.0f)
{
PathSegment & pathSegment = iterationPaths[index];
Geom chosenLight; // Needed for MIS, but not in this integrator. Lets us reuse the direct lighting code for both integrators
float pdfa, pdfb; // Needed for MIS, buyt not in this integrator.
pathSegment.color = (float)nLights * ComputeDirectLighting(index, iter, nLights, numGeoms, pathSegment, shadeableIntersections[index], materials, geometry, chosenLight, pdfa, pdfb);
}
else
{
iterationPaths[index].color = glm::vec3(0.f);
}
}
}
__global__ void MISIntegrator(
int nPaths
, int iter
, int nLights
, int numGeoms
, PathSegment * iterationPaths
, ShadeableIntersection * shadeableIntersections
, Material * materials
, Geom * geometry
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment & pathSegment = iterationPaths[index];
if (shadeableIntersections[index].t > 0.0f)
{
if (pathSegment.remainingBounces == 0)
{
return;
}
//pathSegment.remainingBounces = 0; // THIS IS FOR DIRECT LIGHTING, WE ARE IN MIS INTEGRATOR ************************************ YOO
ShadeableIntersection & shadeableIsect = shadeableIntersections[index];
// Compute global illumination - this is nearly identical to Naive Integrator
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, iterationPaths[index].remainingBounces);
Material & mat = materials[shadeableIsect.materialId];
glm::vec3 f, globalIllumColor, isectPos, normal;
float pdf, absDot;
Ray sampledRay;
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
normal = glm::normalize(shadeableIsect.surfaceNormal);
switch (mat.bxdf)
{
case EMISSIVE:
pathSegment.color = pathSegment.throughput * mat.color * mat.emittance;
pathSegment.remainingBounces = 0;
return;
case DIFFUSE:
sampledRay.direction = glm::normalize(calculateRandomDirectionInHemisphere(normal, rng));
sampledRay.origin = (glm::dot(normal, sampledRay.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
pdf = glm::dot(sampledRay.direction, normal) / PI;
absDot = abs(glm::dot(sampledRay.direction, normal));
f = mat.color / PI;
pathSegment.remainingBounces--;
pathSegment.hitSpecularObject = false;
break;
case SPECULAR_BRDF:
sampledRay.direction = glm::normalize(glm::reflect(pathSegment.ray.direction, normal));
sampledRay.origin = (glm::dot(normal, sampledRay.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
f = glm::vec3(1.f);
pdf = 1.f;
absDot = 1.f; // specular objects don't attenuate color by absDot
pathSegment.remainingBounces--;
pathSegment.hitSpecularObject = true;
break;
}
// Perform no computations for this iteration
if (pathSegment.hitSpecularObject)
{
pathSegment.ray = sampledRay;
return;
}
globalIllumColor = (pdf <= 10.f * EPSILON) ? glm::vec3(0.f) : f * absDot / pdf;
// Compute direct lighting
Geom chosenLight;
float pdf_BSDF_Light; // BSDF-evaluated pdf using the light-importance sampled direction
float pdf_Light_BSDF; // Light-evaluated pdf using the BSDF-importance sampled direction
float pdf_Light; // Light evaluated, light-importance sampled
float pdf_BSDF; // BSDF evaluated, BSDF-importance sampled
glm::vec3 directLighting_Light = ComputeDirectLighting(index, iter, nLights, numGeoms, pathSegment,
shadeableIsect, materials, geometry, chosenLight, pdf_BSDF_Light, pdf_Light);
glm::vec3 directLighting_BSDF = ComputeDirectLighting_BSDF(index, iter, nLights, numGeoms, pathSegment,
shadeableIsect, materials, geometry, chosenLight, pdf_Light_BSDF, pdf_BSDF);
// Compute the proper weights using the various PDFs and power heuristic
float w_Light = PowerHeuristic(1, pdf_Light, 1, pdf_BSDF_Light);
float w_BSDF = PowerHeuristic(1, pdf_BSDF, 1, pdf_Light_BSDF);
glm::vec3 directLightingColor = (w_Light * directLighting_Light + w_BSDF * directLighting_BSDF) * (float)nLights;
// Set pathsegment properties
pathSegment.accumColor += directLightingColor * pathSegment.throughput;
pathSegment.color = pathSegment.accumColor; // so we don't have to have another final gather kernel designed to use accumColor instead of color
pathSegment.throughput *= globalIllumColor;
pathSegment.ray = sampledRay;
}
else
{
pathSegment.color = pathSegment.accumColor;
pathSegment.remainingBounces = 0;
}
}
}
__global__ void NaiveIntegrator(
int nPaths
, int iter
, PathSegment * iterationPaths
, ShadeableIntersection * shadeableIntersections
, Material * materials
)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment & pathSegment = iterationPaths[index];
ShadeableIntersection & shadeableIsect = shadeableIntersections[index];
if (shadeableIsect.t > 0.0f)
{
if (pathSegment.remainingBounces == 0)
{
pathSegment.color = glm::vec3(0.f);
return;
}
thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, iterationPaths[index].remainingBounces);
Material & mat = materials[shadeableIsect.materialId];
/* Compute each part of the LTE */
glm::vec3 f, finalColor, isectPos, normal;
float pdf, absDot;
isectPos = pathSegment.ray.origin + shadeableIsect.t * pathSegment.ray.direction;
normal = glm::normalize(shadeableIsect.surfaceNormal);
// Evaluate the BxDF and compute various parts of the LTE
switch (mat.bxdf)
{
case EMISSIVE:
pathSegment.color *= mat.color * mat.emittance;
pathSegment.remainingBounces = 0;
return;
case DIFFUSE:
pathSegment.ray.direction = glm::normalize(calculateRandomDirectionInHemisphere(normal, rng));
pathSegment.ray.origin = (glm::dot(normal, pathSegment.ray.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
pdf = glm::dot(pathSegment.ray.direction, normal) / PI;
absDot = abs(glm::dot(pathSegment.ray.direction, normal));
f = mat.color / PI;
pathSegment.remainingBounces--;
break;
case SPECULAR_BRDF:
pathSegment.ray.direction = glm::normalize(glm::reflect(pathSegment.ray.direction, normal));
pathSegment.ray.origin = (glm::dot(normal, pathSegment.ray.direction) > 0) ? isectPos + normal * EPSILON : isectPos - normal * EPSILON;
f = glm::vec3(1.f);
pdf = 1.f;
absDot = 1.f; // specular objects don't attenuate color by absDot
pathSegment.remainingBounces--;
break;
}
finalColor = f * pathSegment.color * absDot / pdf;
pathSegment.color = finalColor;
return;
}
else
{
// For all paths that have no intersection
pathSegment.color = glm::vec3(0.f);
pathSegment.remainingBounces = 0;
}
}
}
// Needed for Thrust stream compaction
struct is_active
{
__host__ __device__
bool operator()(const PathSegment &p)
{
return p.remainingBounces > 0;
}
};
struct compareIntersection
{
__host__ __device__
bool operator()(const ShadeableIntersection &s, const ShadeableIntersection &p)
{
return s.materialId < p.materialId;
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
int num_active_paths = num_paths;
generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
dim3 numblocksPathSegmentTracing = (num_active_paths + blockSize1d - 1) / blockSize1d;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
numblocksPathSegmentTracing = (num_active_paths + blockSize1d - 1) / blockSize1d;
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
computeIntersections << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
//#define SORT_BY_MATERIAL_TYPE
#ifdef SORT_BY_MATERIAL_TYPE
{
// sort by material type - this slows stuff down unless there are a lot of materials to make the sorting worth it
thrust::device_ptr<ShadeableIntersection> dev_Thrust_isects(dev_intersections);
thrust::device_ptr<PathSegment> dev_Thrust_paths(dev_paths);
thrust::sort_by_key(dev_Thrust_isects, dev_Thrust_isects + num_active_paths, dev_Thrust_paths, compareIntersection());
}
#endif
#define NAIVE
//#define DIRECT
//#define MIS
#ifdef NAIVE
{
// Compute naive integration
NaiveIntegrator << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, iter
, dev_paths
, dev_intersections
, dev_materials
);
checkCUDAError("Naive Integrator");
}
#else
{
#ifdef DIRECT
{
// Compute direct lighting integration
DirectLightingIntegrator << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, iter
, hst_scene->numLights
, hst_scene->geoms.size()
, dev_paths
, dev_intersections
, dev_materials
, dev_geoms
);
checkCUDAError("Direct Lighting Integrator");
}
#else
{
#ifdef MIS
{
// Compute MIS integration
MISIntegrator << < numblocksPathSegmentTracing, blockSize1d >> > (
num_active_paths
, iter
, hst_scene->numLights
, hst_scene->geoms.size()
, dev_paths
, dev_intersections
, dev_materials
, dev_geoms
);
checkCUDAError("MIS Integrator");
}
#endif
}
#endif
}
#endif
cudaDeviceSynchronize();
#define STREAM_COMPACT // note if you remove this, stuff will break.
#ifdef STREAM_COMPACT
{
// Thrust stream compaction
PathSegment * compactedPaths = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, is_active());
num_active_paths = compactedPaths - dev_paths;
}
#endif
// Update iteration condition
iterationComplete = num_active_paths == 0 || depth > traceDepth;
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather << <numBlocksPixels, blockSize1d >> > (num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
ccaa59a5ff679dc59a3db5d014402bb9cce24fb1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
/****************************************************************************
* An experiment with cuda kernel invocation parameters. 2x3x4 threads on
* one block should yield 24 kernel invocations.
*
* Compile with:
* nvcc -o 2_1_a 2_1_a.cu
*
* Dr Kevan Buckley, University of Wolverhampton, January 2018
*****************************************************************************/
__device__ int is_a_match(char *attempt){
char password1[] ="AA1111";
char password2[] ="BB2222";
char password3[] ="CC3333";
char password4[] ="DD4444";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *pass1 = password1;
char *pass2 = password2;
char *pass3 = password3;
char *pass4 = password4;
while(*a ==*pass1){
if(*a == '\0')
{
printf("password:%s\n", password1);
break;
}
a++;
pass1++;
}
while(*b ==*pass2){
if(*b == '\0')
{
printf("password:%s\n", password2);
break;
}
b++;
pass2++;
}
while(*c ==*pass3){
if(*c == '\0')
{
printf("password:%s\n", password3);
break;
}
c++;
pass3++;
}
while(*d ==*pass4){
if(*d == '\0')
{
printf("password: %s\n",password4);
return 1;
}
d++;
pass4++;
}
return 0;
}
__global__ void kernel(){
char i1, i2, i3, i4;
char password[7];
password[6] ='\0';
int i = blockIdx.x +65;
int j = threadIdx.x+65;
char firstMatch =i;
char secondMatch =j;
password[0] =firstMatch;
password[1] =secondMatch;
for(i1='0'; i1<='9'; i1++){
for(i2='0'; i2<='9'; i2++){
for(i3='0'; i3<='9'; i3++){
for(i4='0'; i4<='9'; i4++){
password[2] =i1;
password[3] =i2;
password[4] =i3;
password[5] =i4;
if(is_a_match(password)){
}
else{
//printf("tried: %s\n",password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( kernel), dim3(26),dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
| ccaa59a5ff679dc59a3db5d014402bb9cce24fb1.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
* An experiment with cuda kernel invocation parameters. 2x3x4 threads on
* one block should yield 24 kernel invocations.
*
* Compile with:
* nvcc -o 2_1_a 2_1_a.cu
*
* Dr Kevan Buckley, University of Wolverhampton, January 2018
*****************************************************************************/
__device__ int is_a_match(char *attempt){
char password1[] ="AA1111";
char password2[] ="BB2222";
char password3[] ="CC3333";
char password4[] ="DD4444";
char *a = attempt;
char *b = attempt;
char *c = attempt;
char *d = attempt;
char *pass1 = password1;
char *pass2 = password2;
char *pass3 = password3;
char *pass4 = password4;
while(*a ==*pass1){
if(*a == '\0')
{
printf("password:%s\n", password1);
break;
}
a++;
pass1++;
}
while(*b ==*pass2){
if(*b == '\0')
{
printf("password:%s\n", password2);
break;
}
b++;
pass2++;
}
while(*c ==*pass3){
if(*c == '\0')
{
printf("password:%s\n", password3);
break;
}
c++;
pass3++;
}
while(*d ==*pass4){
if(*d == '\0')
{
printf("password: %s\n",password4);
return 1;
}
d++;
pass4++;
}
return 0;
}
__global__ void kernel(){
char i1, i2, i3, i4;
char password[7];
password[6] ='\0';
int i = blockIdx.x +65;
int j = threadIdx.x+65;
char firstMatch =i;
char secondMatch =j;
password[0] =firstMatch;
password[1] =secondMatch;
for(i1='0'; i1<='9'; i1++){
for(i2='0'; i2<='9'; i2++){
for(i3='0'; i3<='9'; i3++){
for(i4='0'; i4<='9'; i4++){
password[2] =i1;
password[3] =i2;
password[4] =i3;
password[5] =i4;
if(is_a_match(password)){
}
else{
//printf("tried: %s\n",password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel<<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
94df576efae1cefc723f25957ccc67413be41328.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Unrolled version of kernel that computes everything working with shared and constant memory*/
/*Worked in taking arguments*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
__constant__ float INP1[128];
bool relativelyEqual(float a, float b, float maxreldiff = FLT_EPSILON){
const float difference = fabs(a-b);
a = fabs(a);
b = fabs(b);
const float scaledepsilon = maxreldiff*max(a,b);
return difference <=scaledepsilon;
}
__global__ void kernel(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<128;i++){
temp_res += (INP1[tx]*INP1[i]);
}
op[t] = temp_res;
}
__global__ void kerneltwo(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<64;i++){
temp_res += (INP1[tx]*INP1[i]);
temp_res += (INP1[tx]*INP1[i+64]);
}
op[t] = temp_res;
}
__global__ void kernelfour(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<32;i++){
temp_res += (INP1[tx]*INP1[i]);
temp_res += (INP1[tx]*INP1[i+32]);
temp_res += (INP1[tx]*INP1[i+64]);
temp_res += (INP1[tx]*INP1[i+96]);
}
op[t] = temp_res;
}
__global__ void kerneleight(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<16;i++){
temp_res += (INP1[tx]*INP1[i]);
temp_res += (INP1[tx]*INP1[i+16]);
temp_res += (INP1[tx]*INP1[i+32]);
temp_res += (INP1[tx]*INP1[i+48]);
temp_res += (INP1[tx]*INP1[i+64]);
temp_res += (INP1[tx]*INP1[i+80]);
temp_res += (INP1[tx]*INP1[i+96]);
temp_res += (INP1[tx]*INP1[i+112]);
}
op[t] = temp_res;
}
int main( int argc, char* argv[] )
{
int factor = atoi(argv[1]);
float time;
//Host input vectors
float *input1;
float *input2;
int i, j;
int n;
n = 128;
//Size in bytes for each vector
size_t bytes1 = n*sizeof(float);
size_t bytes2 = n*n*sizeof(float);
//Allocating memory for host vectors
input1 = (float*)malloc(bytes1);
input2 = (float*)malloc(bytes2);
//Device input vectors
float *d_ip1;
float *d_ip2;
//Allocate memory for vectors on GPU
hipMalloc(&d_ip1, bytes1);
hipMalloc(&d_ip2, bytes2);
//Initializing input vectors
for(i=0;i<n;i++){
for(j=0;j<n;j++){
input2[n*i+j] = (float)(rand()/1000000000);
}
input1[i]=(float)(rand()/1000000000);
}
/***********************---CPU code---*******************************/
float cresult[128][128];
float temp[128];
int k;
clock_t Cstart, Cstop;
double ctime;
Cstart = clock();
for(i=0;i<128;i++){
temp[i]=0;
for(j=0;j<128;j++){
temp[i]+=input2[i*128+j];
cresult[i][j]=temp[i];
for(k=0;k<128;k++){
cresult[i][j] += input1[j]*input1[k];
}
//printf("cresult[%d][%d] : %f",i,j,cresult[i][j]);
}
}
Cstop = clock();
ctime = (((double)(Cstop-Cstart))/CLOCKS_PER_SEC)*1000 ;
printf("Time taken on CPU: %fms\n", ctime);
/********************---end of CPU code---***************************/
//Copying input1 to constant memory for better use of memory hierarchy
hipMemcpyToSymbol(INP1, input1, bytes1);
//Host output vector
float *result;
//Allocating memory for host output vectors
result = (float*)malloc(bytes2);
//Device output vector
float *d_result;
//Allocating memory for device output vectors
hipMalloc(&d_result, bytes2);
// Copy host vectors to device
hipMemcpy(d_ip2, input2, bytes2, hipMemcpyHostToDevice);
// Number of threads in each thread block
dim3 dimBlock(128,1);
// Number of thread blocks in grid
dim3 dimGrid(128,1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
if (factor == 1) {
printf("Unrolled Kernel version \n");
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_ip2, d_result);
}
else if (factor == 2) {
printf("Unrolling factor of two Kernel version \n");
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_ip2, d_result);
}
else if (factor == 4) {
printf("Unrolling factor of four Kernel version \n");
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_ip2, d_result);
}
else if (factor == 8) {
printf("Unrolling factor of eight Kernel version \n");
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_ip2, d_result);
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
time = 0;
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("Time taken on GPU: %lf ms\n", time);
hipMemcpy(result, d_result, bytes2, hipMemcpyDeviceToHost);
int count = 0;
for(i=0;i<128;i++){
for(j=0;j<128;j++){
if(floor(cresult[i][j])!=floor(result[i*128+j])){
//(ceil)(result[i*128+j])==(ceil)(cresult[i][j])
count+=1;
}
}
}
printf("count: %d\n",count);
if(count==0){
printf("Verified correctness of CPU and GPU results\n");
}
// Release device memory
hipFree(d_ip1);
hipFree(d_ip2);
hipFree(d_result);
// Release host memory
free(input1);
free(input2);
free(result);
return 0;
}
| 94df576efae1cefc723f25957ccc67413be41328.cu | /*Unrolled version of kernel that computes everything working with shared and constant memory*/
/*Worked in taking arguments*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
__constant__ float INP1[128];
bool relativelyEqual(float a, float b, float maxreldiff = FLT_EPSILON){
const float difference = fabs(a-b);
a = fabs(a);
b = fabs(b);
const float scaledepsilon = maxreldiff*max(a,b);
return difference <=scaledepsilon;
}
__global__ void kernel(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<128;i++){
temp_res += (INP1[tx]*INP1[i]);
}
op[t] = temp_res;
}
__global__ void kerneltwo(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<64;i++){
temp_res += (INP1[tx]*INP1[i]);
temp_res += (INP1[tx]*INP1[i+64]);
}
op[t] = temp_res;
}
__global__ void kernelfour(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<32;i++){
temp_res += (INP1[tx]*INP1[i]);
temp_res += (INP1[tx]*INP1[i+32]);
temp_res += (INP1[tx]*INP1[i+64]);
temp_res += (INP1[tx]*INP1[i+96]);
}
op[t] = temp_res;
}
__global__ void kerneleight(float ip[], float op[]){
__shared__ float local[128];
int tx = threadIdx.x;
int t = tx +blockIdx.x*blockDim.x;
float temp_res=0;
int i;
local[tx] = ip[t];
__syncthreads();
for(i=0;i<=tx;i++){
temp_res+=local[i];
}
for(i=0;i<16;i++){
temp_res += (INP1[tx]*INP1[i]);
temp_res += (INP1[tx]*INP1[i+16]);
temp_res += (INP1[tx]*INP1[i+32]);
temp_res += (INP1[tx]*INP1[i+48]);
temp_res += (INP1[tx]*INP1[i+64]);
temp_res += (INP1[tx]*INP1[i+80]);
temp_res += (INP1[tx]*INP1[i+96]);
temp_res += (INP1[tx]*INP1[i+112]);
}
op[t] = temp_res;
}
int main( int argc, char* argv[] )
{
int factor = atoi(argv[1]);
float time;
//Host input vectors
float *input1;
float *input2;
int i, j;
int n;
n = 128;
//Size in bytes for each vector
size_t bytes1 = n*sizeof(float);
size_t bytes2 = n*n*sizeof(float);
//Allocating memory for host vectors
input1 = (float*)malloc(bytes1);
input2 = (float*)malloc(bytes2);
//Device input vectors
float *d_ip1;
float *d_ip2;
//Allocate memory for vectors on GPU
cudaMalloc(&d_ip1, bytes1);
cudaMalloc(&d_ip2, bytes2);
//Initializing input vectors
for(i=0;i<n;i++){
for(j=0;j<n;j++){
input2[n*i+j] = (float)(rand()/1000000000);
}
input1[i]=(float)(rand()/1000000000);
}
/***********************---CPU code---*******************************/
float cresult[128][128];
float temp[128];
int k;
clock_t Cstart, Cstop;
double ctime;
Cstart = clock();
for(i=0;i<128;i++){
temp[i]=0;
for(j=0;j<128;j++){
temp[i]+=input2[i*128+j];
cresult[i][j]=temp[i];
for(k=0;k<128;k++){
cresult[i][j] += input1[j]*input1[k];
}
//printf("cresult[%d][%d] : %f",i,j,cresult[i][j]);
}
}
Cstop = clock();
ctime = (((double)(Cstop-Cstart))/CLOCKS_PER_SEC)*1000 ;
printf("Time taken on CPU: %fms\n", ctime);
/********************---end of CPU code---***************************/
//Copying input1 to constant memory for better use of memory hierarchy
cudaMemcpyToSymbol(INP1, input1, bytes1);
//Host output vector
float *result;
//Allocating memory for host output vectors
result = (float*)malloc(bytes2);
//Device output vector
float *d_result;
//Allocating memory for device output vectors
cudaMalloc(&d_result, bytes2);
// Copy host vectors to device
cudaMemcpy(d_ip2, input2, bytes2, cudaMemcpyHostToDevice);
// Number of threads in each thread block
dim3 dimBlock(128,1);
// Number of thread blocks in grid
dim3 dimGrid(128,1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
if (factor == 1) {
printf("Unrolled Kernel version \n");
kernel<<<dimGrid, dimBlock>>>(d_ip2, d_result);
}
else if (factor == 2) {
printf("Unrolling factor of two Kernel version \n");
kernel<<<dimGrid, dimBlock>>>(d_ip2, d_result);
}
else if (factor == 4) {
printf("Unrolling factor of four Kernel version \n");
kernel<<<dimGrid, dimBlock>>>(d_ip2, d_result);
}
else if (factor == 8) {
printf("Unrolling factor of eight Kernel version \n");
kernel<<<dimGrid, dimBlock>>>(d_ip2, d_result);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
time = 0;
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time taken on GPU: %lf ms\n", time);
cudaMemcpy(result, d_result, bytes2, cudaMemcpyDeviceToHost);
int count = 0;
for(i=0;i<128;i++){
for(j=0;j<128;j++){
if(floor(cresult[i][j])!=floor(result[i*128+j])){
//(ceil)(result[i*128+j])==(ceil)(cresult[i][j])
count+=1;
}
}
}
printf("count: %d\n",count);
if(count==0){
printf("Verified correctness of CPU and GPU results\n");
}
// Release device memory
cudaFree(d_ip1);
cudaFree(d_ip2);
cudaFree(d_result);
// Release host memory
free(input1);
free(input2);
free(result);
return 0;
}
|
7813f72158c1ea40092abc48947428e4f9a8d875.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_eqScalar (int n, double *result, double *x, double y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = (x[id] == y)?1.0:0.0;
}
} | 7813f72158c1ea40092abc48947428e4f9a8d875.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_eqScalar (int n, double *result, double *x, double y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = (x[id] == y)?1.0:0.0;
}
} |
e5d97d191072e63e06530f53d3da960251fcaa7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define NDIM 2
template <int8_t kNdim>
class IndexIterator {
public:
__host__ __device__ void Set(int64_t i) {
int8_t j = kNdim;
for (; --j >= 1;) {
index_[j] = i % shape_[j];
i /= shape_[j];
}
index_[j] = i % shape_[j];
}
__host__ __device__ int64_t* index() { return index_; }
private:
//const int64_t* shape_;
int64_t shape_[kNdim];
//int64_t total_size_{};
//int64_t raw_index_{};
//int64_t step_{};
int64_t index_[kNdim];
};
__global__ void test(IndexIterator<NDIM> i) {
i.Set(1);
printf("1\n", i.index()[0]);
}
int main() {
IndexIterator<NDIM> i{};
hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, i);
}
| e5d97d191072e63e06530f53d3da960251fcaa7b.cu | #include <stdio.h>
#define NDIM 2
template <int8_t kNdim>
class IndexIterator {
public:
__host__ __device__ void Set(int64_t i) {
int8_t j = kNdim;
for (; --j >= 1;) {
index_[j] = i % shape_[j];
i /= shape_[j];
}
index_[j] = i % shape_[j];
}
__host__ __device__ int64_t* index() { return index_; }
private:
//const int64_t* shape_;
int64_t shape_[kNdim];
//int64_t total_size_{};
//int64_t raw_index_{};
//int64_t step_{};
int64_t index_[kNdim];
};
__global__ void test(IndexIterator<NDIM> i) {
i.Set(1);
printf("1\n", i.index()[0]);
}
int main() {
IndexIterator<NDIM> i{};
test<<<1,1>>>(i);
}
|
54668bc6a88f329552667ee49d47e9afcc3c545b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <utility>
#include <vector>
#include "dali/core/format.h"
#include "dali/operators/bbox/bb_flip.cuh"
namespace dali {
/**
* @param samples - Sample description (input/output pointer + flipping configuration)
* @param blocks - Mapping the current CUDA block to range within particular sample
*/
template <bool ltrb>
__global__ void BbFlipKernel(const BbFlipSampleDesc *samples, const kernels::BlockDesc<1> *blocks) {
const auto &block = blocks[blockIdx.x];
const auto &sample = samples[block.sample_idx];
for (int idx = threadIdx.x + block.start.x; idx < block.end.x; idx += blockDim.x) {
bool h = sample.horz;
bool v = sample.vert;
const auto *in = &sample.input[4 * idx];
auto *out = &sample.output[4 * idx];
if (ltrb) {
out[0] = h ? 1.0f - in[2] : in[0];
out[1] = v ? 1.0f - in[3] : in[1];
out[2] = h ? 1.0f - in[0] : in[2];
out[3] = v ? 1.0f - in[1] : in[3];
} else {
// No range checking required if the parenthesis is respected in the two lines below.
// If the original bounding box satisfies the condition that x + w <= 1.0f, then the
// expression 1.0f - (x + w) is guaranteed to yield a non-negative result. QED.
out[0] = h ? 1.0f - (in[0] + in[2]) : in[0];
out[1] = v ? 1.0f - (in[1] + in[3]) : in[1];
out[2] = in[2]; // width and
out[3] = in[3]; // height remain unaffected
}
}
}
TensorListShape<2> GetNormalizedShape(const TensorListShape<-1> &shape) {
if (shape.sample_dim() == 2) {
return shape.to_static<2>();
}
if (shape.sample_dim() > 2) {
std::array<std::pair<int, int>, 1> collapse_group = {{{0, shape.sample_dim() - 1}}};
return collapse_dims<2>(shape, collapse_group);
}
TensorListShape<2> result(shape.num_samples(), 2);
for (int i = 0; i < shape.num_samples(); i++) {
auto tspan = shape.tensor_shape_span(i);
result.set_tensor_shape(i, {tspan[0] / 4, 4});
}
return result;
}
void BbFlipGPU::RunImpl(workspace_t<GPUBackend> &ws) {
auto &input = ws.Input<GPUBackend>(0);
const auto &shape = input.shape();
auto nsamples = shape.num_samples();
auto &output = ws.Output<GPUBackend>(0);
DALI_ENFORCE(IsType<float>(input.type()),
make_string("Expected input data as float; got ", input.type()));
DALI_ENFORCE(input._num_elements() % 4 == 0,
make_string("Input data size must be a multiple of 4 if it contains bounding",
" boxes; got ", input._num_elements()));
for (int sample = 0; sample < nsamples; sample++) {
auto dim = shape[sample].sample_dim();
DALI_ENFORCE(dim < 2 || shape[sample][dim - 1] == 4,
"If bounding box tensor is >= 2D, innermost dimension must be 4");
DALI_ENFORCE(dim > 1 || shape[sample][0] % 4 == 0,
"Flat representation of bounding boxes must have size divisible by 4");
}
TensorListShape<2> strong_shape = GetNormalizedShape(shape);
block_setup_.SetupBlocks(strong_shape, true);
blocks_dev_.from_host(block_setup_.Blocks(), ws.stream());
samples_.resize(nsamples);
auto stream = ws.stream();
const auto num_boxes = input._num_elements() / 4;
if (num_boxes == 0) {
return;
}
for (int sample_idx = 0; sample_idx < nsamples; sample_idx++) {
samples_[sample_idx].output = output.mutable_tensor<float>(sample_idx);
samples_[sample_idx].input = input.tensor<float>(sample_idx);
samples_[sample_idx].horz = horz_[sample_idx].data[0];
samples_[sample_idx].vert = vert_[sample_idx].data[0];
}
samples_dev_.from_host(samples_, ws.stream());
dim3 grid = block_setup_.GridDim();
dim3 block = block_setup_.BlockDim();
if (ltrb_) {
hipLaunchKernelGGL(( BbFlipKernel<true>), dim3(grid), dim3(block), 0, stream, samples_dev_.data(), blocks_dev_.data());
} else {
hipLaunchKernelGGL(( BbFlipKernel<false>), dim3(grid), dim3(block), 0, stream, samples_dev_.data(), blocks_dev_.data());
}
CUDA_CALL(hipGetLastError());
}
DALI_REGISTER_OPERATOR(BbFlip, BbFlipGPU, GPU);
} // namespace dali
| 54668bc6a88f329552667ee49d47e9afcc3c545b.cu | // Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <utility>
#include <vector>
#include "dali/core/format.h"
#include "dali/operators/bbox/bb_flip.cuh"
namespace dali {
/**
* @param samples - Sample description (input/output pointer + flipping configuration)
* @param blocks - Mapping the current CUDA block to range within particular sample
*/
template <bool ltrb>
__global__ void BbFlipKernel(const BbFlipSampleDesc *samples, const kernels::BlockDesc<1> *blocks) {
const auto &block = blocks[blockIdx.x];
const auto &sample = samples[block.sample_idx];
for (int idx = threadIdx.x + block.start.x; idx < block.end.x; idx += blockDim.x) {
bool h = sample.horz;
bool v = sample.vert;
const auto *in = &sample.input[4 * idx];
auto *out = &sample.output[4 * idx];
if (ltrb) {
out[0] = h ? 1.0f - in[2] : in[0];
out[1] = v ? 1.0f - in[3] : in[1];
out[2] = h ? 1.0f - in[0] : in[2];
out[3] = v ? 1.0f - in[1] : in[3];
} else {
// No range checking required if the parenthesis is respected in the two lines below.
// If the original bounding box satisfies the condition that x + w <= 1.0f, then the
// expression 1.0f - (x + w) is guaranteed to yield a non-negative result. QED.
out[0] = h ? 1.0f - (in[0] + in[2]) : in[0];
out[1] = v ? 1.0f - (in[1] + in[3]) : in[1];
out[2] = in[2]; // width and
out[3] = in[3]; // height remain unaffected
}
}
}
TensorListShape<2> GetNormalizedShape(const TensorListShape<-1> &shape) {
if (shape.sample_dim() == 2) {
return shape.to_static<2>();
}
if (shape.sample_dim() > 2) {
std::array<std::pair<int, int>, 1> collapse_group = {{{0, shape.sample_dim() - 1}}};
return collapse_dims<2>(shape, collapse_group);
}
TensorListShape<2> result(shape.num_samples(), 2);
for (int i = 0; i < shape.num_samples(); i++) {
auto tspan = shape.tensor_shape_span(i);
result.set_tensor_shape(i, {tspan[0] / 4, 4});
}
return result;
}
void BbFlipGPU::RunImpl(workspace_t<GPUBackend> &ws) {
auto &input = ws.Input<GPUBackend>(0);
const auto &shape = input.shape();
auto nsamples = shape.num_samples();
auto &output = ws.Output<GPUBackend>(0);
DALI_ENFORCE(IsType<float>(input.type()),
make_string("Expected input data as float; got ", input.type()));
DALI_ENFORCE(input._num_elements() % 4 == 0,
make_string("Input data size must be a multiple of 4 if it contains bounding",
" boxes; got ", input._num_elements()));
for (int sample = 0; sample < nsamples; sample++) {
auto dim = shape[sample].sample_dim();
DALI_ENFORCE(dim < 2 || shape[sample][dim - 1] == 4,
"If bounding box tensor is >= 2D, innermost dimension must be 4");
DALI_ENFORCE(dim > 1 || shape[sample][0] % 4 == 0,
"Flat representation of bounding boxes must have size divisible by 4");
}
TensorListShape<2> strong_shape = GetNormalizedShape(shape);
block_setup_.SetupBlocks(strong_shape, true);
blocks_dev_.from_host(block_setup_.Blocks(), ws.stream());
samples_.resize(nsamples);
auto stream = ws.stream();
const auto num_boxes = input._num_elements() / 4;
if (num_boxes == 0) {
return;
}
for (int sample_idx = 0; sample_idx < nsamples; sample_idx++) {
samples_[sample_idx].output = output.mutable_tensor<float>(sample_idx);
samples_[sample_idx].input = input.tensor<float>(sample_idx);
samples_[sample_idx].horz = horz_[sample_idx].data[0];
samples_[sample_idx].vert = vert_[sample_idx].data[0];
}
samples_dev_.from_host(samples_, ws.stream());
dim3 grid = block_setup_.GridDim();
dim3 block = block_setup_.BlockDim();
if (ltrb_) {
BbFlipKernel<true><<<grid, block, 0, stream>>>(samples_dev_.data(), blocks_dev_.data());
} else {
BbFlipKernel<false><<<grid, block, 0, stream>>>(samples_dev_.data(), blocks_dev_.data());
}
CUDA_CALL(cudaGetLastError());
}
DALI_REGISTER_OPERATOR(BbFlip, BbFlipGPU, GPU);
} // namespace dali
|
3b8ff51c51f7973b33a742e19ecf67b8b8a6e2c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "softmax_focal_loss_op.h"
namespace caffe2 {
namespace {
__global__ void SpatialSoftmaxKernel(const int N, const int A,
const int H, const int W, const float* Xdata, float* Pdata,
const int num_classes) {
CUDA_1D_KERNEL_LOOP(index, N * A * H * W) {
int D = num_classes * A;
int x = index % W;
int y = (index / W) % H;
int a = (index / (W * H)) % A;
int i = index / W / H / A;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
__global__ void SoftmaxFocalLossKernel(
const int N, const int A, const int H, const int W,
const float* Pdata, const int* targets, float* losses,
const float* weight_pos, const float gamma, const float alpha,
const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * H * W) {
int D = A * num_classes;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
const int label = static_cast<int>(targets[i]);
float Np = max(weight_pos[0], 1.0);
float z = (label == 0) * (1 - alpha) / Np +
(label >= 1) * alpha / Np;
losses[i] = 0.0;
if (label >= 0) {
int offset = a * num_classes;
int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x;
losses[i] =
-(pow(1.0f - Pdata[idx], gamma) *
log(max(Pdata[idx], FLT_MIN))) * z;
}
}
}
__global__ void SoftmaxFocalLossGradientWeightKernel(
const int N, const int A, const int H, const int W,
const float* Pdata, const int* targets, float* buff,
const float* weight_pos, const float gamma, const float alpha,
const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * H * W) {
int D = A * num_classes;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
const int label = static_cast<int>(targets[i]);
float Np = max(weight_pos[0], 1.0);
float z = (label == 0) * (1 - alpha) / Np +
(label >= 1) * alpha / Np;
buff[i] = 0.0;
if (label >= 0) {
int offset = a * num_classes;
int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x;
float onemp = 1. - Pdata[idx];
float p = Pdata[idx];
buff[i] =
(-pow(onemp, gamma) +
gamma * pow(onemp, gamma - 1) * p * log(max(p, FLT_MIN))) * z;
}
}
}
__global__ void SoftmaxFocalLossGradientKernel(
const int N, const int D, const int H, const int W,
const float* Pdata, const int* targets, const float* buff,
const float* d_loss_data, float* dX, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * D * H * W) {
int A = D / num_classes;
int x = i % W;
int y = (i / W) % H;
int d = (i / (W * H)) % D;
int a = d / num_classes;
int c = d % num_classes;
int n = i / (W * H * D);
float d_loss = *d_loss_data;
int ind = n * (H * W * A) + a * (H * W) + y * W + x;
const int label = static_cast<int>(targets[ind]);
float c1 = (label >= 0) * 1.0;
float c2 = (label == c) * 1.0;
dX[i] = 0.0;
dX[i] = c1 * d_loss * buff[ind] * (c2 - Pdata[i]);
}
}
} // namespace
template <>
bool SoftmaxFocalLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels
auto& wp = Input(2); // num of foregound
// average loss as output
// softmax probability, going to be re-used in gradient
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
ReinitializeTensor(&losses_, {N * A * H * W}, at::dtype<float>().device(CUDA));
auto* P = Output(1, {N * D * H * W}, at::dtype<float>());
auto* avg_loss = Output(0, vector<int64_t>(), at::dtype<float>());
math::Set<float, CUDAContext>(
avg_loss->size(), 0.f, avg_loss->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
P->size(), 0.f, P->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
losses_.size(), 0.f, losses_.mutable_data<float>(), &context_);
DCHECK_EQ(X.ndim(), 4);
const float* Xdata = X.data<float>();
const float* Wdata = wp.data<float>();
// Spatial Softmax Kernel
hipLaunchKernelGGL(( SpatialSoftmaxKernel)
, dim3(CAFFE_GET_BLOCKS(N * A * H * W)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, A, H, W, Xdata, P->mutable_data<float>(), num_classes_);
// Compute loss for each x,y location
const int* Tdata = T.data<int>();
hipLaunchKernelGGL(( SoftmaxFocalLossKernel)
, dim3(CAFFE_GET_BLOCKS(N * A * H * W)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, A, H, W, P->data<float>(), Tdata, losses_.mutable_data<float>(),
Wdata, gamma_, alpha_, num_classes_);
// sum the losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SoftmaxFocalLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Label
auto& wp = Input(2); // num of foreground example
auto& P = Input(3); // Softmax Probability
auto& d_avg_loss = Input(4);
auto* dX = Output(0); // gradient wrt logits
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
ReinitializeTensor(&buff_, {N * A * H * W}, at::dtype<float>().device(CUDA));
dX->ResizeLike(X);
const float* Xdata = X.data<float>();
const int* Tdata = T.data<int>();
const float* Pdata = P.data<float>();
const float* Wdata = wp.data<float>();
// Compute the weight for gradients
hipLaunchKernelGGL(( SoftmaxFocalLossGradientWeightKernel)
, dim3(CAFFE_GET_BLOCKS(N * A * H * W)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, A, H, W, Pdata, Tdata, buff_.mutable_data<float>(),
Wdata, gamma_, alpha_, num_classes_);
// Compute the gradient with the weights
const float* Bdata = buff_.data<float>();
hipLaunchKernelGGL(( SoftmaxFocalLossGradientKernel)
, dim3(CAFFE_GET_BLOCKS(N * D * H * W)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, D, H, W, Pdata, Tdata, Bdata, d_avg_loss.data<float>(),
dX->mutable_data<float>(), num_classes_);
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxFocalLoss,
SoftmaxFocalLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxFocalLossGradient,
SoftmaxFocalLossGradientOp<float, CUDAContext>);
} // namespace caffe2
| 3b8ff51c51f7973b33a742e19ecf67b8b8a6e2c1.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "softmax_focal_loss_op.h"
namespace caffe2 {
namespace {
__global__ void SpatialSoftmaxKernel(const int N, const int A,
const int H, const int W, const float* Xdata, float* Pdata,
const int num_classes) {
CUDA_1D_KERNEL_LOOP(index, N * A * H * W) {
int D = num_classes * A;
int x = index % W;
int y = (index / W) % H;
int a = (index / (W * H)) % A;
int i = index / W / H / A;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
__global__ void SoftmaxFocalLossKernel(
const int N, const int A, const int H, const int W,
const float* Pdata, const int* targets, float* losses,
const float* weight_pos, const float gamma, const float alpha,
const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * H * W) {
int D = A * num_classes;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
const int label = static_cast<int>(targets[i]);
float Np = max(weight_pos[0], 1.0);
float z = (label == 0) * (1 - alpha) / Np +
(label >= 1) * alpha / Np;
losses[i] = 0.0;
if (label >= 0) {
int offset = a * num_classes;
int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x;
losses[i] =
-(pow(1.0f - Pdata[idx], gamma) *
log(max(Pdata[idx], FLT_MIN))) * z;
}
}
}
__global__ void SoftmaxFocalLossGradientWeightKernel(
const int N, const int A, const int H, const int W,
const float* Pdata, const int* targets, float* buff,
const float* weight_pos, const float gamma, const float alpha,
const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * H * W) {
int D = A * num_classes;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
const int label = static_cast<int>(targets[i]);
float Np = max(weight_pos[0], 1.0);
float z = (label == 0) * (1 - alpha) / Np +
(label >= 1) * alpha / Np;
buff[i] = 0.0;
if (label >= 0) {
int offset = a * num_classes;
int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x;
float onemp = 1. - Pdata[idx];
float p = Pdata[idx];
buff[i] =
(-pow(onemp, gamma) +
gamma * pow(onemp, gamma - 1) * p * log(max(p, FLT_MIN))) * z;
}
}
}
__global__ void SoftmaxFocalLossGradientKernel(
const int N, const int D, const int H, const int W,
const float* Pdata, const int* targets, const float* buff,
const float* d_loss_data, float* dX, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * D * H * W) {
int A = D / num_classes;
int x = i % W;
int y = (i / W) % H;
int d = (i / (W * H)) % D;
int a = d / num_classes;
int c = d % num_classes;
int n = i / (W * H * D);
float d_loss = *d_loss_data;
int ind = n * (H * W * A) + a * (H * W) + y * W + x;
const int label = static_cast<int>(targets[ind]);
float c1 = (label >= 0) * 1.0;
float c2 = (label == c) * 1.0;
dX[i] = 0.0;
dX[i] = c1 * d_loss * buff[ind] * (c2 - Pdata[i]);
}
}
} // namespace
template <>
bool SoftmaxFocalLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels
auto& wp = Input(2); // num of foregound
// average loss as output
// softmax probability, going to be re-used in gradient
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
ReinitializeTensor(&losses_, {N * A * H * W}, at::dtype<float>().device(CUDA));
auto* P = Output(1, {N * D * H * W}, at::dtype<float>());
auto* avg_loss = Output(0, vector<int64_t>(), at::dtype<float>());
math::Set<float, CUDAContext>(
avg_loss->size(), 0.f, avg_loss->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
P->size(), 0.f, P->mutable_data<float>(), &context_);
math::Set<float, CUDAContext>(
losses_.size(), 0.f, losses_.mutable_data<float>(), &context_);
DCHECK_EQ(X.ndim(), 4);
const float* Xdata = X.data<float>();
const float* Wdata = wp.data<float>();
// Spatial Softmax Kernel
SpatialSoftmaxKernel
<<<CAFFE_GET_BLOCKS(N * A * H * W), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, A, H, W, Xdata, P->mutable_data<float>(), num_classes_);
// Compute loss for each x,y location
const int* Tdata = T.data<int>();
SoftmaxFocalLossKernel
<<<CAFFE_GET_BLOCKS(N * A * H * W), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, A, H, W, P->data<float>(), Tdata, losses_.mutable_data<float>(),
Wdata, gamma_, alpha_, num_classes_);
// sum the losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_);
math::Scale<float, float, CUDAContext>(
1, scale_, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SoftmaxFocalLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Label
auto& wp = Input(2); // num of foreground example
auto& P = Input(3); // Softmax Probability
auto& d_avg_loss = Input(4);
auto* dX = Output(0); // gradient wrt logits
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
ReinitializeTensor(&buff_, {N * A * H * W}, at::dtype<float>().device(CUDA));
dX->ResizeLike(X);
const float* Xdata = X.data<float>();
const int* Tdata = T.data<int>();
const float* Pdata = P.data<float>();
const float* Wdata = wp.data<float>();
// Compute the weight for gradients
SoftmaxFocalLossGradientWeightKernel
<<<CAFFE_GET_BLOCKS(N * A * H * W), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, A, H, W, Pdata, Tdata, buff_.mutable_data<float>(),
Wdata, gamma_, alpha_, num_classes_);
// Compute the gradient with the weights
const float* Bdata = buff_.data<float>();
SoftmaxFocalLossGradientKernel
<<<CAFFE_GET_BLOCKS(N * D * H * W), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, H, W, Pdata, Tdata, Bdata, d_avg_loss.data<float>(),
dX->mutable_data<float>(), num_classes_);
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxFocalLoss,
SoftmaxFocalLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxFocalLossGradient,
SoftmaxFocalLossGradientOp<float, CUDAContext>);
} // namespace caffe2
|
fbe5e23e40f294e1e979b31ce7a5b0db665c649e.hip | // !!! This is a file automatically generated by hipify!!!
#define ONLY_IMPORTANT
#include"my_functions.h"
#include "./CUCCL_LE/CUCCL_LE.cuh"
#include "./CUCCL_NP/CUCCL_NP.cuh"
#include "./CUCCL_DPL/CUCCL_DPL.cuh"
#include <iomanip>
#include <iostream>
#include <vector>
#include <stdlib.h>
using namespace std;
using namespace CUCCL;
void testCCL(char const* flag, int *data, const int width, const int height, int degreeOfConnectivity, int threshold, int *labels)
{
#ifndef ONLY_IMPORTANT
// const auto width = 32;
// const auto height = 8;
// unsigned char data[width * height] =
// {
// 135, 135, 240, 240, 240, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 135, 135, 120, 120,
// 135, 135, 240, 240, 240, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 135, 135, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 135, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120
// };
cout << "Binary image is : " <<endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
cout << setw(3) << static_cast<int>(data[i * width + j]) << " ";
}
cout << endl;
}
cout<<endl;
#endif // !ONLY_IMPORTANT
if (flag == (std::string)"LE")
{
CCLLEGPU ccl;
ccl.CudaCCL(data, labels, width, height, degreeOfConnectivity, threshold);
#ifndef ONLY_IMPORTANT
cout << "Label Mesh by CCL LE : " << endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
dest[i * width + j] = labels[i * width + j];
//cout << setw(3) << labels[i * width + j] << " ";
}
//cout << endl;
}
#endif // !ONLY_IMPORTANT
}
if (flag == (std::string)"NP")
{
CCLNPGPU cclnp;
cclnp.CudaCCL(data, labels, width, height, degreeOfConnectivity, threshold);
#ifndef ONLY_IMPORTANT
cout << "Label Mesh by CCL NP : " << endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
cout << setw(3) << labels[i * width + j] << " ";
}
cout << endl;
}
#endif // !ONLY_IMPORTANT
}
if (flag == (std::string)"DPL")
{
CCLDPLGPU ccldpl;
ccldpl.CudaCCL(data, labels, width, height, degreeOfConnectivity, threshold);
#ifndef ONLY_IMPORTANT
cout << "Label Mesh by CCL DPL : " << endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
cout << setw(3) << labels[i * width + j] << " ";
}
cout << endl;
}
#endif // !ONLY_IMPORTANT
}
}
int main(int argc, char **args)
{
if (argc != 6) {
std::cerr << "Number of arguments is invalid" << std::endl;
exit(EXIT_FAILURE);
}
if (args[4][0] != '4' && args[4][0] != '8') {
std::cerr << "Connectivity (4. argument) not set correctly" << std::endl;
exit(EXIT_FAILURE);
}
int connectivity = args[4][0] - '0';
int threshold = atoi(args[5]);
if (args[5][0] < '0' || args[5][0] > '9' || threshold < 0) {
std::cerr << "Threshold (5. argument) not set correctly" << std::endl;
exit(EXIT_FAILURE);
}
int *img_map;
int width, height;
load_image(args[1], &img_map, &width, &height);
int *labels = new int[width * height];
if (labels == NULL) {
std::cerr << "Label array allocation failed" << std::endl;
exit(EXIT_FAILURE);
}
hipFree(0);
{
Timer my_time(std::string(args[3]) + ": In total ");
testCCL(args[3], img_map, width, height, connectivity, threshold, labels);
}
std::cout << "Number of final segments " << count_segments(labels, width, height) << ".\n";
save_labels(labels, width, height, args[2]);
std::cout << "DONE" << std::endl;
delete[] img_map;
delete[] labels;
}
| fbe5e23e40f294e1e979b31ce7a5b0db665c649e.cu |
#define ONLY_IMPORTANT
#include"my_functions.h"
#include "./CUCCL_LE/CUCCL_LE.cuh"
#include "./CUCCL_NP/CUCCL_NP.cuh"
#include "./CUCCL_DPL/CUCCL_DPL.cuh"
#include <iomanip>
#include <iostream>
#include <vector>
#include <stdlib.h>
using namespace std;
using namespace CUCCL;
void testCCL(char const* flag, int *data, const int width, const int height, int degreeOfConnectivity, int threshold, int *labels)
{
#ifndef ONLY_IMPORTANT
// const auto width = 32;
// const auto height = 8;
// unsigned char data[width * height] =
// {
// 135, 135, 240, 240, 240, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 135, 135, 120, 120,
// 135, 135, 240, 240, 240, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 135, 135, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 135, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 135, 135, 135, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120,
// 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120
// };
cout << "Binary image is : " <<endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
cout << setw(3) << static_cast<int>(data[i * width + j]) << " ";
}
cout << endl;
}
cout<<endl;
#endif // !ONLY_IMPORTANT
if (flag == (std::string)"LE")
{
CCLLEGPU ccl;
ccl.CudaCCL(data, labels, width, height, degreeOfConnectivity, threshold);
#ifndef ONLY_IMPORTANT
cout << "Label Mesh by CCL LE : " << endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
dest[i * width + j] = labels[i * width + j];
//cout << setw(3) << labels[i * width + j] << " ";
}
//cout << endl;
}
#endif // !ONLY_IMPORTANT
}
if (flag == (std::string)"NP")
{
CCLNPGPU cclnp;
cclnp.CudaCCL(data, labels, width, height, degreeOfConnectivity, threshold);
#ifndef ONLY_IMPORTANT
cout << "Label Mesh by CCL NP : " << endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
cout << setw(3) << labels[i * width + j] << " ";
}
cout << endl;
}
#endif // !ONLY_IMPORTANT
}
if (flag == (std::string)"DPL")
{
CCLDPLGPU ccldpl;
ccldpl.CudaCCL(data, labels, width, height, degreeOfConnectivity, threshold);
#ifndef ONLY_IMPORTANT
cout << "Label Mesh by CCL DPL : " << endl;
for (auto i = 0; i < height; i++)
{
for (auto j = 0; j < width; j++)
{
cout << setw(3) << labels[i * width + j] << " ";
}
cout << endl;
}
#endif // !ONLY_IMPORTANT
}
}
int main(int argc, char **args)
{
if (argc != 6) {
std::cerr << "Number of arguments is invalid" << std::endl;
exit(EXIT_FAILURE);
}
if (args[4][0] != '4' && args[4][0] != '8') {
std::cerr << "Connectivity (4. argument) not set correctly" << std::endl;
exit(EXIT_FAILURE);
}
int connectivity = args[4][0] - '0';
int threshold = atoi(args[5]);
if (args[5][0] < '0' || args[5][0] > '9' || threshold < 0) {
std::cerr << "Threshold (5. argument) not set correctly" << std::endl;
exit(EXIT_FAILURE);
}
int *img_map;
int width, height;
load_image(args[1], &img_map, &width, &height);
int *labels = new int[width * height];
if (labels == NULL) {
std::cerr << "Label array allocation failed" << std::endl;
exit(EXIT_FAILURE);
}
cudaFree(0);
{
Timer my_time(std::string(args[3]) + ": In total ");
testCCL(args[3], img_map, width, height, connectivity, threshold, labels);
}
std::cout << "Number of final segments " << count_segments(labels, width, height) << ".\n";
save_labels(labels, width, height, args[2]);
std::cout << "DONE" << std::endl;
delete[] img_map;
delete[] labels;
}
|
5e5d3861ac4c1af8a127bc4157b05177cb3d34e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file psroi_pooling.cu
* \brief psroi pooling operator
* \author Yi Li, Tairui Chen, Guodong Zhang, Haozhi Qi, Jifeng Dai
*/
#include "./psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define PSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void PSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois,
const int output_dim,
const int group_size,
DType* top_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
DType out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
DType bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? (DType)0. : out_sum/bin_area;
}
}
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *top_data = out.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data);
PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
template <typename DType>
__global__ void PSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int group_size,
const int output_dim,
DType* bottom_diff,
const DType* bottom_rois) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
DType bin_area = (hend - hstart)*(wend - wstart);
DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
// LOG(INFO) << "PSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *bottom_diff = in_grad.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, group_size_, output_dim_, bottom_diff, bottom_rois);
PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolForward(out, data, bbox, spatial_scale, output_dim_, group_size_);
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, spatial_scale, output_dim_, group_size_);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new PSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 5e5d3861ac4c1af8a127bc4157b05177cb3d34e7.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file psroi_pooling.cu
* \brief psroi pooling operator
* \author Yi Li, Tairui Chen, Guodong Zhang, Haozhi Qi, Jifeng Dai
*/
#include "./psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define PSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template <typename DType>
__global__ void PSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois,
const int output_dim,
const int group_size,
DType* top_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph) * bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
DType out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
DType bin_area = (hend - hstart)*(wend - wstart);
top_data[index] = is_empty? (DType)0. : out_sum/bin_area;
}
}
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *top_data = out.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = out.size(2);
const int pooled_width = out.size(3);
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data);
PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
template <typename DType>
__global__ void PSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int group_size,
const int output_dim,
DType* bottom_diff,
const DType* bottom_rois) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
int hstart = floor(static_cast<DType>(ph)* bin_size_h
+ roi_start_h);
int wstart = floor(static_cast<DType>(pw)* bin_size_w
+ roi_start_w);
int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h
+ roi_start_h);
int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w
+ roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop*group_size + gh)*group_size + gw;
DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
DType bin_area = (hend - hstart)*(wend - wstart);
DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h*width + w;
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
// LOG(INFO) << "PSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_rois = bbox.dptr_;
DType *bottom_diff = in_grad.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = out_grad.size(2);
const int pooled_width = out_grad.size(3);
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, group_size_, output_dim_, bottom_diff, bottom_rois);
PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolForward(out, data, bbox, spatial_scale, output_dim_, group_size_);
}
template<typename DType>
inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 2, DType> &bbox,
const float spatial_scale,
const int output_dim_,
const int group_size_) {
cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, spatial_scale, output_dim_, group_size_);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) {
Operator* op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new PSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
7ed56cfddd0d82c5886cb3311f8c288f0647fd9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernels/tensor_operators.h"
#include "training/graph_group_sync.h"
namespace marian {
void SyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see changes of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for(auto opt : shardOpt_)
scheduler_->registerTrainingObserver(opt);
}
void SyncGraphGroup::updateMovingAverage(Tensor paramsAvg,
Tensor params,
size_t batches) {
using namespace functional;
float decay = ::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10));
Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params);
}
void SyncGraphGroup::fetchParams(Tensor oldParams,
const std::vector<Tensor>& params) {
// @TODO read guard on parameters
int pos = 0;
std::vector<std::thread> threads;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[=](int idx, int pos) {
oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]);
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads) {
t.join();
}
}
void SyncGraphGroup::execute(Ptr<data::Batch> batch) {
std::vector<Ptr<data::Batch>> batches = batch->split(devices_.size());
if(first_) {
for(size_t i = 0; i < graphs_.size(); ++i) {
// takes care of thead_local stuff
THREAD_GUARD(builders_[i]->build(graphs_[i], batches[0]);
graphs_[i]->forward(););
if(i > 0)
graphs_[i]->params()->vals()->copyFrom(graphs_[0]->params()->vals());
}
if(params_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
shardSize_ = ceil(totalSize / (float)devices_.size());
int pos = 0;
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
auto paramsAlloc = New<TensorAllocator>(device);
paramsAllocs_.push_back(paramsAlloc);
paramsAlloc->reserveExact(3 * __size__ * sizeof(float));
Tensor param, grad, tmp;
paramsAlloc->allocate(param, {1, __size__});
paramsAlloc->allocate(grad, {1, __size__});
paramsAlloc->allocate(tmp, {1, __size__});
params_.push_back(param);
grads_.push_back(grad);
tmpTensors_.push_back(tmp);
param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__));
pos += __size__;
totalSize -= __size__;
}
}
if(movingAvg_ && paramsAvg_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
int i = 0;
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor paramAvg;
auto allocator = New<TensorAllocator>(device);
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(paramAvg, {1, __size__});
paramAvg->copyFrom(params_[i++]);
paramsAllocAvg_.push_back(allocator);
paramsAvg_.push_back(paramAvg);
}
}
first_ = false;
}
std::vector<float> costs(devices_.size());
{
auto task = [this, &costs, batches](size_t idx) {
auto graph = graphs_[idx];
auto batch = batches[idx];
if(batch->size() > 0) {
auto costNode = builders_[idx]->build(graph, batch);
graph->forward();
costs[idx] = costNode->scalar();
graph->backward();
}
};
ThreadPool pool(devices_.size(), devices_.size());
for(int idx = 0; idx < batches.size(); ++idx)
pool.enqueue(task, idx);
}
{
auto task = [this, batches](size_t idx, int pos) {
grads_[idx]->set(0);
int size = params_[idx]->size();
int i = 0;
float div = devices_.size(); // no. of GPUs
for(auto graph : graphs_) {
if(batches[i]->size() > 0) {
auto subGrad = graph->params()->grads()->subtensor(pos, size);
tmpTensors_[idx]->copyFrom(subGrad);
using namespace functional;
Element(_1 = _1 + (_2 / div), grads_[idx], tmpTensors_[idx]);
}
i++;
}
shardOpt_[idx]->update(params_[idx], grads_[idx]);
if(movingAvg_)
updateMovingAverage(
paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches());
for(auto graph : graphs_) {
auto subParam = graph->params()->vals()->subtensor(pos, size);
subParam->copyFrom(params_[idx]);
}
};
ThreadPool pool(devices_.size(), devices_.size());
int pos = 0;
for(int idx = 0; idx < devices_.size(); ++idx) {
pool.enqueue(task, idx, pos);
pos += params_[idx]->size();
}
}
float cost = 0;
for(auto c : costs)
cost += c;
cost = cost / costs.size();
if(scheduler_) {
scheduler_->update(cost, batch);
if(scheduler_->saving()) {
this->save();
}
if(scheduler_->validating()) {
if(movingAvg_)
for(auto graph : graphs_)
fetchParams(graph->params()->vals(), paramsAvg_);
// safe, because all graphs are idle during validation with sync sgd
scheduler_->validate(graphs_);
if(movingAvg_)
for(auto graph : graphs_)
fetchParams(graph->params()->vals(), params_);
}
}
}
}
| 7ed56cfddd0d82c5886cb3311f8c288f0647fd9a.cu | #include "kernels/tensor_operators.h"
#include "training/graph_group_sync.h"
namespace marian {
void SyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see changes of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for(auto opt : shardOpt_)
scheduler_->registerTrainingObserver(opt);
}
void SyncGraphGroup::updateMovingAverage(Tensor paramsAvg,
Tensor params,
size_t batches) {
using namespace functional;
float decay = std::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10));
Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params);
}
void SyncGraphGroup::fetchParams(Tensor oldParams,
const std::vector<Tensor>& params) {
// @TODO read guard on parameters
int pos = 0;
std::vector<std::thread> threads;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[=](int idx, int pos) {
oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]);
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads) {
t.join();
}
}
void SyncGraphGroup::execute(Ptr<data::Batch> batch) {
std::vector<Ptr<data::Batch>> batches = batch->split(devices_.size());
if(first_) {
for(size_t i = 0; i < graphs_.size(); ++i) {
// takes care of thead_local stuff
THREAD_GUARD(builders_[i]->build(graphs_[i], batches[0]);
graphs_[i]->forward(););
if(i > 0)
graphs_[i]->params()->vals()->copyFrom(graphs_[0]->params()->vals());
}
if(params_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
shardSize_ = ceil(totalSize / (float)devices_.size());
int pos = 0;
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
auto paramsAlloc = New<TensorAllocator>(device);
paramsAllocs_.push_back(paramsAlloc);
paramsAlloc->reserveExact(3 * __size__ * sizeof(float));
Tensor param, grad, tmp;
paramsAlloc->allocate(param, {1, __size__});
paramsAlloc->allocate(grad, {1, __size__});
paramsAlloc->allocate(tmp, {1, __size__});
params_.push_back(param);
grads_.push_back(grad);
tmpTensors_.push_back(tmp);
param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__));
pos += __size__;
totalSize -= __size__;
}
}
if(movingAvg_ && paramsAvg_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
int i = 0;
for(auto device : devices_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor paramAvg;
auto allocator = New<TensorAllocator>(device);
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(paramAvg, {1, __size__});
paramAvg->copyFrom(params_[i++]);
paramsAllocAvg_.push_back(allocator);
paramsAvg_.push_back(paramAvg);
}
}
first_ = false;
}
std::vector<float> costs(devices_.size());
{
auto task = [this, &costs, batches](size_t idx) {
auto graph = graphs_[idx];
auto batch = batches[idx];
if(batch->size() > 0) {
auto costNode = builders_[idx]->build(graph, batch);
graph->forward();
costs[idx] = costNode->scalar();
graph->backward();
}
};
ThreadPool pool(devices_.size(), devices_.size());
for(int idx = 0; idx < batches.size(); ++idx)
pool.enqueue(task, idx);
}
{
auto task = [this, batches](size_t idx, int pos) {
grads_[idx]->set(0);
int size = params_[idx]->size();
int i = 0;
float div = devices_.size(); // no. of GPUs
for(auto graph : graphs_) {
if(batches[i]->size() > 0) {
auto subGrad = graph->params()->grads()->subtensor(pos, size);
tmpTensors_[idx]->copyFrom(subGrad);
using namespace functional;
Element(_1 = _1 + (_2 / div), grads_[idx], tmpTensors_[idx]);
}
i++;
}
shardOpt_[idx]->update(params_[idx], grads_[idx]);
if(movingAvg_)
updateMovingAverage(
paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches());
for(auto graph : graphs_) {
auto subParam = graph->params()->vals()->subtensor(pos, size);
subParam->copyFrom(params_[idx]);
}
};
ThreadPool pool(devices_.size(), devices_.size());
int pos = 0;
for(int idx = 0; idx < devices_.size(); ++idx) {
pool.enqueue(task, idx, pos);
pos += params_[idx]->size();
}
}
float cost = 0;
for(auto c : costs)
cost += c;
cost = cost / costs.size();
if(scheduler_) {
scheduler_->update(cost, batch);
if(scheduler_->saving()) {
this->save();
}
if(scheduler_->validating()) {
if(movingAvg_)
for(auto graph : graphs_)
fetchParams(graph->params()->vals(), paramsAvg_);
// safe, because all graphs are idle during validation with sync sgd
scheduler_->validate(graphs_);
if(movingAvg_)
for(auto graph : graphs_)
fetchParams(graph->params()->vals(), params_);
}
}
}
}
|
48460b143f8aca0761a26de6e4c638235ff07f4d.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/rank_attention.cu.h"
#include "paddle/fluid/operators/rank_attention_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename DeviceContext, typename T>
class RankAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X");
auto *rank_offset = ctx.Input<Tensor>("RankOffset");
auto *param = ctx.Input<Tensor>("RankParam");
auto *input_help = ctx.Output<Tensor>("InputHelp");
auto *ins_rank = ctx.Output<Tensor>("InsRank");
int max_rank = ctx.Attr<int>("MaxRank");
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *Out = ctx.Output<Tensor>("Out");
// check dims
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
PADDLE_ENFORCE_EQ(
rank_offset_dims[0],
ins_num,
platform::errors::InvalidArgument("Input(RankOffset) has wrong rows."));
PADDLE_ENFORCE_EQ((rank_offset_dims[1] - 1) / 2,
max_rank,
platform::errors::InvalidArgument(
"Input(RankOffset) has wrong columns."));
PADDLE_ENFORCE_EQ(
max_rank * max_rank * x_fea_dim,
para_row,
platform::errors::InvalidArgument("Input(RankParam) has wrong rows."));
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
int max_ins = ::max(ins_num, max_size);
Tensor param_help;
param_help = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_help.mutable_data<T>(ctx.GetPlace());
input_help->Resize({max_ins, block_matrix_row});
ins_rank->Resize({max_ins, 1});
input_help->mutable_data<T>(ctx.GetPlace());
ins_rank->mutable_data<T>(ctx.GetPlace());
Out->mutable_data<T>(ctx.GetPlace());
// initialize
auto param_help_eigen = framework::EigenVector<T>::Flatten(param_help);
auto input_help_eigen = framework::EigenVector<T>::Flatten(*input_help);
auto ins_rank_eigen = framework::EigenVector<T>::Flatten(*ins_rank);
auto out_eigen = framework::EigenVector<T>::Flatten(*Out);
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
param_help_eigen.device(place) =
param_help_eigen.constant(static_cast<T>(0));
input_help_eigen.device(place) =
input_help_eigen.constant(static_cast<T>(0));
ins_rank_eigen.device(place) = ins_rank_eigen.constant(static_cast<T>(-1));
out_eigen.device(place) = out_eigen.constant(static_cast<T>(0));
// get data ptr
T *input_help_data = input_help->data<T>();
T *param_help_data = param_help.data<T>();
T *ins_rank_data = ins_rank->data<T>();
T *out_data = Out->data<T>();
expand_rank_attention_input(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
input_help_data,
ins_num,
block_matrix_row,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
ins_rank_data,
max_rank);
expand_rank_attention_param(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
param->data<T>(),
para_row,
para_col,
param_help_data,
ins_num * block_matrix_row,
para_col,
max_rank);
CBLAS_TRANSPOSE transA = CblasNoTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
T alpha = 1;
T beta = 0;
int64_t strideA = block_matrix_row;
int64_t strideB = block_matrix_row * para_col;
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.BatchedGEMM(transA,
transB,
1,
para_col,
block_matrix_row,
alpha,
input_help_data,
param_help_data,
beta,
out_data,
ins_num,
strideA,
strideB);
}
};
template <typename DeviceContext, typename T>
class RankAttentionGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X"); // not use data
auto *rank_offset = ctx.Input<Tensor>("RankOffset"); // not use data
auto *param = ctx.Input<Tensor>("RankParam"); // not use data
auto *input_help = ctx.Input<Tensor>("InputHelp");
auto *ins_rank = ctx.Input<Tensor>("InsRank");
auto *dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *drank_para = ctx.Output<Tensor>(framework::GradVarName("RankParam"));
// get dim
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
auto max_rank = (rank_offset_dims[1] - 1) / 2;
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
int max_ins = ::max(ins_num, max_size);
// initialize out grad
drank_para->mutable_data<T>(ctx.GetPlace());
auto drank_para_eigen = framework::EigenVector<T>::Flatten(*drank_para);
drank_para_eigen.device(place) =
drank_para_eigen.constant(static_cast<T>(0));
// copy data
Tensor param_grad;
param_grad = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_grad.mutable_data<T>(ctx.GetPlace());
// initialize
auto param_grad_eigen = framework::EigenVector<T>::Flatten(param_grad);
param_grad_eigen.device(place) =
param_grad_eigen.constant(static_cast<T>(0));
// get data ptr
const T *input_help_data = input_help->data<T>();
const T *ins_rank_data = ins_rank->data<T>();
T *param_grad_data = param_grad.data<T>();
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
T alpha = 1;
T beta = 0;
// get param_grad
CBLAS_TRANSPOSE transA = CblasTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
int64_t strideA = block_matrix_row;
int64_t strideB = para_col;
blas.BatchedGEMM(transA,
transB,
block_matrix_row,
para_col,
1,
alpha,
input_help_data,
dout->data<T>(),
beta,
param_grad_data,
ins_num,
strideA,
strideB);
// merge param_grad to get drank_para
merge_rank_attention_param_grad(ctx.cuda_device_context().stream(),
param_grad_data,
ins_num * block_matrix_row,
para_col,
drank_para->data<T>(),
para_row,
para_col,
ins_rank_data,
ins_num,
max_rank,
x_fea_dim);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = phi::GPUContext;
REGISTER_OP_CUDA_KERNEL(rank_attention,
ops::RankAttentionCUDAKernel<GPUCtx, float>,
ops::RankAttentionCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(rank_attention_grad,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, float>,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, double>);
| 48460b143f8aca0761a26de6e4c638235ff07f4d.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/rank_attention.cu.h"
#include "paddle/fluid/operators/rank_attention_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename DeviceContext, typename T>
class RankAttentionCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X");
auto *rank_offset = ctx.Input<Tensor>("RankOffset");
auto *param = ctx.Input<Tensor>("RankParam");
auto *input_help = ctx.Output<Tensor>("InputHelp");
auto *ins_rank = ctx.Output<Tensor>("InsRank");
int max_rank = ctx.Attr<int>("MaxRank");
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *Out = ctx.Output<Tensor>("Out");
// check dims
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
PADDLE_ENFORCE_EQ(
rank_offset_dims[0],
ins_num,
platform::errors::InvalidArgument("Input(RankOffset) has wrong rows."));
PADDLE_ENFORCE_EQ((rank_offset_dims[1] - 1) / 2,
max_rank,
platform::errors::InvalidArgument(
"Input(RankOffset) has wrong columns."));
PADDLE_ENFORCE_EQ(
max_rank * max_rank * x_fea_dim,
para_row,
platform::errors::InvalidArgument("Input(RankParam) has wrong rows."));
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
int max_ins = std::max(ins_num, max_size);
Tensor param_help;
param_help = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_help.mutable_data<T>(ctx.GetPlace());
input_help->Resize({max_ins, block_matrix_row});
ins_rank->Resize({max_ins, 1});
input_help->mutable_data<T>(ctx.GetPlace());
ins_rank->mutable_data<T>(ctx.GetPlace());
Out->mutable_data<T>(ctx.GetPlace());
// initialize
auto param_help_eigen = framework::EigenVector<T>::Flatten(param_help);
auto input_help_eigen = framework::EigenVector<T>::Flatten(*input_help);
auto ins_rank_eigen = framework::EigenVector<T>::Flatten(*ins_rank);
auto out_eigen = framework::EigenVector<T>::Flatten(*Out);
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
param_help_eigen.device(place) =
param_help_eigen.constant(static_cast<T>(0));
input_help_eigen.device(place) =
input_help_eigen.constant(static_cast<T>(0));
ins_rank_eigen.device(place) = ins_rank_eigen.constant(static_cast<T>(-1));
out_eigen.device(place) = out_eigen.constant(static_cast<T>(0));
// get data ptr
T *input_help_data = input_help->data<T>();
T *param_help_data = param_help.data<T>();
T *ins_rank_data = ins_rank->data<T>();
T *out_data = Out->data<T>();
expand_rank_attention_input(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
input_help_data,
ins_num,
block_matrix_row,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
ins_rank_data,
max_rank);
expand_rank_attention_param(ctx.cuda_device_context().stream(),
X->data<T>(),
ins_num,
x_fea_dim,
rank_offset->data<int>(),
rank_offset_dims[0],
rank_offset_dims[1],
param->data<T>(),
para_row,
para_col,
param_help_data,
ins_num * block_matrix_row,
para_col,
max_rank);
CBLAS_TRANSPOSE transA = CblasNoTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
T alpha = 1;
T beta = 0;
int64_t strideA = block_matrix_row;
int64_t strideB = block_matrix_row * para_col;
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.BatchedGEMM(transA,
transB,
1,
para_col,
block_matrix_row,
alpha,
input_help_data,
param_help_data,
beta,
out_data,
ins_num,
strideA,
strideB);
}
};
template <typename DeviceContext, typename T>
class RankAttentionGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto *X = ctx.Input<Tensor>("X"); // not use data
auto *rank_offset = ctx.Input<Tensor>("RankOffset"); // not use data
auto *param = ctx.Input<Tensor>("RankParam"); // not use data
auto *input_help = ctx.Input<Tensor>("InputHelp");
auto *ins_rank = ctx.Input<Tensor>("InsRank");
auto *dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
int64_t max_size = ctx.Attr<int>("MaxSize");
auto *drank_para = ctx.Output<Tensor>(framework::GradVarName("RankParam"));
// get dim
auto x_dims = X->dims();
auto ins_num = x_dims[0];
auto x_fea_dim = x_dims[1];
auto para_dims = param->dims();
auto para_row = para_dims[0];
auto para_col = para_dims[1];
auto rank_offset_dims = rank_offset->dims();
auto max_rank = (rank_offset_dims[1] - 1) / 2;
int block_matrix_row = max_rank * x_fea_dim;
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
auto &place =
*ctx.template device_context<phi::GPUContext>().eigen_device();
int max_ins = std::max(ins_num, max_size);
// initialize out grad
drank_para->mutable_data<T>(ctx.GetPlace());
auto drank_para_eigen = framework::EigenVector<T>::Flatten(*drank_para);
drank_para_eigen.device(place) =
drank_para_eigen.constant(static_cast<T>(0));
// copy data
Tensor param_grad;
param_grad = ctx.AllocateTmpTensor<T, DeviceContext>(
{max_ins * block_matrix_row, para_col}, dev_ctx);
param_grad.mutable_data<T>(ctx.GetPlace());
// initialize
auto param_grad_eigen = framework::EigenVector<T>::Flatten(param_grad);
param_grad_eigen.device(place) =
param_grad_eigen.constant(static_cast<T>(0));
// get data ptr
const T *input_help_data = input_help->data<T>();
const T *ins_rank_data = ins_rank->data<T>();
T *param_grad_data = param_grad.data<T>();
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
T alpha = 1;
T beta = 0;
// get param_grad
CBLAS_TRANSPOSE transA = CblasTrans;
CBLAS_TRANSPOSE transB = CblasNoTrans;
int64_t strideA = block_matrix_row;
int64_t strideB = para_col;
blas.BatchedGEMM(transA,
transB,
block_matrix_row,
para_col,
1,
alpha,
input_help_data,
dout->data<T>(),
beta,
param_grad_data,
ins_num,
strideA,
strideB);
// merge param_grad to get drank_para
merge_rank_attention_param_grad(ctx.cuda_device_context().stream(),
param_grad_data,
ins_num * block_matrix_row,
para_col,
drank_para->data<T>(),
para_row,
para_col,
ins_rank_data,
ins_num,
max_rank,
x_fea_dim);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = phi::GPUContext;
REGISTER_OP_CUDA_KERNEL(rank_attention,
ops::RankAttentionCUDAKernel<GPUCtx, float>,
ops::RankAttentionCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(rank_attention_grad,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, float>,
ops::RankAttentionGradOpCUDAKernel<GPUCtx, double>);
|
29eac2d21a4633447835079e1f526d3d46ee1269.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// TODO: add copyright
/*
Compute unnormalized attractive force for barnes-hut approximation of t-SNE.
Attractive force is given by pij*qij.
*/
#include "kernels/attr_forces.h"
__global__
void ComputePijxQijKernel(
float * __restrict__ attr_forces,
const float * __restrict__ pij,
const float * __restrict__ points,
const int * __restrict__ coo_indices,
const int num_points,
const int num_nonzero)
{
register int TID, i, j;
register float ix, iy, jx, jy, dx, dy, pijqij;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= num_nonzero) return;
// Seems like the coo indices store the endpoints of each edge corresponding to a P
// in an adjacent fashion so we have something like: [1|2;2|3;i|j; ...]
i = coo_indices[2*TID];
j = coo_indices[2*TID+1];
ix = points[i]; iy = points[num_points + i]; // gets the x and y coordinates of point i
jx = points[j]; jy = points[num_points + j]; // gets the x and y coordinates of point j
dx = ix - jx;
dy = iy - jy;
pijqij = pij[TID] / (1 + dx*dx + dy*dy); // computes the pq mult by acknowledging that q=v/Z
atomicAdd(attr_forces + i, pijqij * dx); // Adds x component of the point
atomicAdd(attr_forces + num_points + i, pijqij * dy); // Adds y component of the point
}
void tsnecuda::ComputeAttractiveForces(
tsnecuda::GpuOptions &gpu_opt,
hipsparseHandle_t &handle,
hipsparseMatDescr_t &descr,
thrust::device_vector<float> &attr_forces,
thrust::device_vector<float> &sparse_pij,
thrust::device_vector<int> &pij_row_ptr,
thrust::device_vector<int> &pij_col_ind,
thrust::device_vector<int> &coo_indices,
thrust::device_vector<float> &points,
thrust::device_vector<float> &ones,
const int num_points,
const int num_nonzero)
{
// Computes pij*qij for each i,j
// TODO: this is bad style
const int BLOCKSIZE = 1024;
const int NBLOCKS = iDivUp(num_nonzero, BLOCKSIZE);
hipLaunchKernelGGL(( ComputePijxQijKernel), dim3(NBLOCKS), dim3(BLOCKSIZE), 0, 0,
thrust::raw_pointer_cast(attr_forces.data()),
thrust::raw_pointer_cast(sparse_pij.data()),
thrust::raw_pointer_cast(points.data()),
thrust::raw_pointer_cast(coo_indices.data()),
num_points,
num_nonzero);
GpuErrorCheck(hipDeviceSynchronize());
}
| 29eac2d21a4633447835079e1f526d3d46ee1269.cu | // TODO: add copyright
/*
Compute unnormalized attractive force for barnes-hut approximation of t-SNE.
Attractive force is given by pij*qij.
*/
#include "kernels/attr_forces.h"
__global__
void ComputePijxQijKernel(
float * __restrict__ attr_forces,
const float * __restrict__ pij,
const float * __restrict__ points,
const int * __restrict__ coo_indices,
const int num_points,
const int num_nonzero)
{
register int TID, i, j;
register float ix, iy, jx, jy, dx, dy, pijqij;
TID = threadIdx.x + blockIdx.x * blockDim.x;
if (TID >= num_nonzero) return;
// Seems like the coo indices store the endpoints of each edge corresponding to a P
// in an adjacent fashion so we have something like: [1|2;2|3;i|j; ...]
i = coo_indices[2*TID];
j = coo_indices[2*TID+1];
ix = points[i]; iy = points[num_points + i]; // gets the x and y coordinates of point i
jx = points[j]; jy = points[num_points + j]; // gets the x and y coordinates of point j
dx = ix - jx;
dy = iy - jy;
pijqij = pij[TID] / (1 + dx*dx + dy*dy); // computes the pq mult by acknowledging that q=v/Z
atomicAdd(attr_forces + i, pijqij * dx); // Adds x component of the point
atomicAdd(attr_forces + num_points + i, pijqij * dy); // Adds y component of the point
}
void tsnecuda::ComputeAttractiveForces(
tsnecuda::GpuOptions &gpu_opt,
cusparseHandle_t &handle,
cusparseMatDescr_t &descr,
thrust::device_vector<float> &attr_forces,
thrust::device_vector<float> &sparse_pij,
thrust::device_vector<int> &pij_row_ptr,
thrust::device_vector<int> &pij_col_ind,
thrust::device_vector<int> &coo_indices,
thrust::device_vector<float> &points,
thrust::device_vector<float> &ones,
const int num_points,
const int num_nonzero)
{
// Computes pij*qij for each i,j
// TODO: this is bad style
const int BLOCKSIZE = 1024;
const int NBLOCKS = iDivUp(num_nonzero, BLOCKSIZE);
ComputePijxQijKernel<<<NBLOCKS, BLOCKSIZE>>>(
thrust::raw_pointer_cast(attr_forces.data()),
thrust::raw_pointer_cast(sparse_pij.data()),
thrust::raw_pointer_cast(points.data()),
thrust::raw_pointer_cast(coo_indices.data()),
num_points,
num_nonzero);
GpuErrorCheck(cudaDeviceSynchronize());
}
|
08dc4c61fd1c33b05a438f48f324dfed91633b7d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
float *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 08dc4c61fd1c33b05a438f48f324dfed91633b7d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
float *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixKernel<<<gridBlock,threadBlock>>>(d_in,d_out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixKernel<<<gridBlock,threadBlock>>>(d_in,d_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixKernel<<<gridBlock,threadBlock>>>(d_in,d_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
CopyConnectionsCoordinatesKernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CopyConnectionsCoordinatesKernel( int *connectionMatrix, float *pointsCoordinates, float *vertexData, int *connectionCount, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * maxCells)
{
if(connectionMatrix[threadId] == 1)
{
int from = threadId / maxCells;
int to = threadId % maxCells;
if(to > from)
{
//int vertexDataOffset = maxCells * 3;
int vertexDataOffset = 0;
int connIdx = atomicAdd( &connectionCount[0], 1);
vertexData[vertexDataOffset + connIdx * 6] = pointsCoordinates[from * 3];
vertexData[vertexDataOffset + connIdx * 6 + 1] = pointsCoordinates[from * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 2] = pointsCoordinates[from * 3 + 2];
vertexData[vertexDataOffset + connIdx * 6 + 3] = pointsCoordinates[to * 3];
vertexData[vertexDataOffset + connIdx * 6 + 4] = pointsCoordinates[to * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 5] = pointsCoordinates[to * 3 + 2];
}
}
}
} | CopyConnectionsCoordinatesKernel.cu | #include "includes.h"
__global__ void CopyConnectionsCoordinatesKernel( int *connectionMatrix, float *pointsCoordinates, float *vertexData, int *connectionCount, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells * maxCells)
{
if(connectionMatrix[threadId] == 1)
{
int from = threadId / maxCells;
int to = threadId % maxCells;
if(to > from)
{
//int vertexDataOffset = maxCells * 3;
int vertexDataOffset = 0;
int connIdx = atomicAdd( &connectionCount[0], 1);
vertexData[vertexDataOffset + connIdx * 6] = pointsCoordinates[from * 3];
vertexData[vertexDataOffset + connIdx * 6 + 1] = pointsCoordinates[from * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 2] = pointsCoordinates[from * 3 + 2];
vertexData[vertexDataOffset + connIdx * 6 + 3] = pointsCoordinates[to * 3];
vertexData[vertexDataOffset + connIdx * 6 + 4] = pointsCoordinates[to * 3 + 1];
vertexData[vertexDataOffset + connIdx * 6 + 5] = pointsCoordinates[to * 3 + 2];
}
}
}
} |
1bb734d3e183ae0bd7e1e857253e315b219f4a81.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
const int MAX_THREAD_NUMBER = 1000000;
extern "C" __device__ long long counterArray[MAX_THREAD_NUMBER];
long long dynamicKernelIndex = 0;
void bambooLogKernelBegin(int staticKernelIndex) {
}
void bambooLogKernelEnd(int staticKernelIndex) {
#ifdef KERNELTRACE
hipDeviceSynchronize();
#endif
long long resultArray[MAX_THREAD_NUMBER] = {0};
hipMemcpyFromSymbol(&resultArray, counterArray, MAX_THREAD_NUMBER * sizeof(long long), 0, hipMemcpyDeviceToHost);
for(long long i=0; i<MAX_THREAD_NUMBER; i++){
if(resultArray[i] != 0){
//printf(" -- index %lld -- counter %lld --\n", i, resultArray[i]);
FILE *profileFile = fopen("bamboo.profile.txt", "a");
fprintf(profileFile, " -- threadIndex %lld -- instCount %lld -- dynamicKernelIndex %lld -- staticKernelIndex %d -- \n", i, resultArray[i], dynamicKernelIndex, staticKernelIndex);
fclose(profileFile);
}
}
memset(resultArray, 0, sizeof(resultArray));
hipMemcpyToSymbol(counterArray, &resultArray, MAX_THREAD_NUMBER * sizeof(long long), 0, hipMemcpyHostToDevice);
dynamicKernelIndex++;
}
| 1bb734d3e183ae0bd7e1e857253e315b219f4a81.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
const int MAX_THREAD_NUMBER = 1000000;
extern "C" __device__ long long counterArray[MAX_THREAD_NUMBER];
long long dynamicKernelIndex = 0;
void bambooLogKernelBegin(int staticKernelIndex) {
}
void bambooLogKernelEnd(int staticKernelIndex) {
#ifdef KERNELTRACE
cudaDeviceSynchronize();
#endif
long long resultArray[MAX_THREAD_NUMBER] = {0};
cudaMemcpyFromSymbol(&resultArray, counterArray, MAX_THREAD_NUMBER * sizeof(long long), 0, cudaMemcpyDeviceToHost);
for(long long i=0; i<MAX_THREAD_NUMBER; i++){
if(resultArray[i] != 0){
//printf(" -- index %lld -- counter %lld --\n", i, resultArray[i]);
FILE *profileFile = fopen("bamboo.profile.txt", "a");
fprintf(profileFile, " -- threadIndex %lld -- instCount %lld -- dynamicKernelIndex %lld -- staticKernelIndex %d -- \n", i, resultArray[i], dynamicKernelIndex, staticKernelIndex);
fclose(profileFile);
}
}
memset(resultArray, 0, sizeof(resultArray));
cudaMemcpyToSymbol(counterArray, &resultArray, MAX_THREAD_NUMBER * sizeof(long long), 0, cudaMemcpyHostToDevice);
dynamicKernelIndex++;
}
|
b43b2063186f5ca2b591a58e6b51aa944bd853f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "cutil_math.h" // required for float3 vector math
__global__ void ball_intersect_point_kernel(
int b, int n, int m, float radius,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float x0 = ray_start[j * 3 + 0];
float y0 = ray_start[j * 3 + 1];
float z0 = ray_start[j * 3 + 2];
float xw = ray_dir[j * 3 + 0];
float yw = ray_dir[j * 3 + 1];
float zw = ray_dir[j * 3 + 2];
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float x = points[k * 3 + 0] - x0;
float y = points[k * 3 + 1] - y0;
float z = points[k * 3 + 2] - z0;
float d2 = x * x + y * y + z * z;
float d2_proj = pow(x * xw + y * yw + z * zw, 2);
float r2 = d2 - d2_proj;
if (r2 < radius2) {
idx[j * n_max + cnt] = k;
float depth = sqrt(d2_proj);
float depth_blur = sqrt(radius2 - r2);
min_depth[j * n_max + cnt] = depth - depth_blur;
max_depth[j * n_max + cnt] = depth + depth_blur;
++cnt;
}
}
}
}
__device__ float2 RayAABBIntersection(
const float3 &ori,
const float3 &dir,
const float3 ¢er,
float half_voxel) {
float f_low = 0;
float f_high = 100000.;
float f_dim_low, f_dim_high, temp, inv_ray_dir, start, aabb;
for (int d = 0; d < 3; ++d) {
switch (d) {
case 0:
inv_ray_dir = __fdividef(1.0f, dir.x); start = ori.x; aabb = center.x; break;
case 1:
inv_ray_dir = __fdividef(1.0f, dir.y); start = ori.y; aabb = center.y; break;
case 2:
inv_ray_dir = __fdividef(1.0f, dir.z); start = ori.z; aabb = center.z; break;
}
f_dim_low = (aabb - half_voxel - start) * inv_ray_dir;
f_dim_high = (aabb + half_voxel - start) * inv_ray_dir;
// Make sure low is less than high
if (f_dim_high < f_dim_low) {
temp = f_dim_low;
f_dim_low = f_dim_high;
f_dim_high = temp;
}
// If this dimension's high is less than the low we got then we definitely missed.
if (f_dim_high < f_low) {
return make_float2(-1.0f, -1.0f);
}
// Likewise if the low is less than the high.
if (f_dim_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
// Add the clip from this dimension to the previous results
f_low = (f_dim_low > f_low) ? f_dim_low : f_low;
f_high = (f_dim_high < f_high) ? f_dim_high : f_high;
if (f_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
}
return make_float2(f_low, f_high);
}
__global__ void aabb_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel);
if (depths.x > -1.0f){
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt;
}
}
}
}
__global__ void svo_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
const int *__restrict__ children,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
/*
TODO: this is an inefficient implementation of the
navie Ray -- Sparse Voxel Octree Intersection.
It can be further improved using:
Revelles, Jorge, Carlos Urena, and Miguel Lastra.
"An efficient parametric algorithm for octree traversal." (2000).
*/
int batch_index = blockIdx.x;
points += batch_index * n * 3;
children += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int stack[256] = {-1}; // DFS, initialize the stack
int ptr = 0, cnt = 0, k = -1;
stack[ptr] = n - 1; // ROOT node is always the last
while (ptr > -1 && cnt < n_max) {
assert((ptr < 256));
// evaluate the current node
k = stack[ptr];
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel * float(children[k * 9 + 8]));
stack[ptr] = -1; ptr--;
if (depths.x > -1.0f) { // ray did not miss the voxel
// TODO: here it should be able to know which children is ok, further optimize the code
if (children[k * 9 + 8] == 1) { // this is a terminal node
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt; continue;
}
for (int u = 0; u < 8; u++) {
if (children[k * 9 + u] > -1) {
ptr++; stack[ptr] = children[k * 9 + u]; // push child to the stack
}
}
}
}
}
}
__device__ float3 RayTriangleIntersection(
const float3 &ori,
const float3 &dir,
const float3 &v0,
const float3 &v1,
const float3 &v2,
float blur) {
float3 v0v1 = v1 - v0;
float3 v0v2 = v2 - v0;
float3 v0O = ori - v0;
float3 dir_crs_v0v2 = cross(dir, v0v2);
float det = dot(v0v1, dir_crs_v0v2);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(v0O, dir_crs_v0v2) * det;
if (u < 0.0f - blur || u > 1.0f + blur)
return make_float3(-1.0f, 0.0f, 0.0f);
float3 v0O_crs_v0v1 = cross(v0O, v0v1);
float v = dot(dir, v0O_crs_v0v1) * det;
if (v < 0.0f - blur || v > 1.0f + blur)
return make_float3(-1.0f, 0.0f, 0.0f);
if ((u + v) < 0.0f - blur || (u + v) > 1.0f + blur)
return make_float3(-1.0f, 0.0f, 0.0f);
float t = dot(v0v2, v0O_crs_v0v1) * det;
return make_float3(t, u, v);
}
__global__ void triangle_intersect_point_kernel(
int b, int n, int m, float cagesize,
float blur, int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ face_points,
int *__restrict__ idx,
float *__restrict__ depth,
float *__restrict__ uv) {
int batch_index = blockIdx.x;
face_points += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
depth += batch_index * m * n_max * 3;
uv += batch_index * m * n_max * 2;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < m; j += stride) {
// go over rays
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int cnt = 0;
for (int k = 0; k < n && cnt < n_max; ++k) {
// go over triangles
float3 tuv = RayTriangleIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(face_points[k * 9 + 0], face_points[k * 9 + 1], face_points[k * 9 + 2]),
make_float3(face_points[k * 9 + 3], face_points[k * 9 + 4], face_points[k * 9 + 5]),
make_float3(face_points[k * 9 + 6], face_points[k * 9 + 7], face_points[k * 9 + 8]),
blur);
if (tuv.x > 0) {
int ki = k;
float d = tuv.x, u = tuv.y, v = tuv.z;
// sort
for (int l = 0; l < cnt; l++) {
if (d < depth[j * n_max * 3 + l * 3]) {
swap(ki, idx[j * n_max + l]);
swap(d, depth[j * n_max * 3 + l * 3]);
swap(u, uv[j * n_max * 2 + l * 2]);
swap(v, uv[j * n_max * 2 + l * 2 + 1]);
}
}
idx[j * n_max + cnt] = ki;
depth[j * n_max * 3 + cnt * 3] = d;
uv[j * n_max * 2 + cnt * 2] = u;
uv[j * n_max * 2 + cnt * 2 + 1] = v;
cnt++;
}
}
for (int l = 0; l < cnt; l++) {
// compute min_depth
if (l == 0)
depth[j * n_max * 3 + l * 3 + 1] = -cagesize;
else
depth[j * n_max * 3 + l * 3 + 1] = -fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3] - depth[j * n_max * 3 + l * 3 - 3]));
// compute max_depth
if (l == cnt - 1)
depth[j * n_max * 3 + l * 3 + 2] = cagesize;
else
depth[j * n_max * 3 + l * 3 + 2] = fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3 + 3] - depth[j * n_max * 3 + l * 3]));
}
}
}
void ball_intersect_point_kernel_wrapper(
int b, int n, int m, float radius, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( ball_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, radius, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void aabb_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( aabb_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void svo_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points, const int *children,
int *idx, float *min_depth, float *max_depth) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( svo_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, children, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void triangle_intersect_point_kernel_wrapper(
int b, int n, int m, float cagesize, float blur, int n_max,
const float *ray_start, const float *ray_dir, const float *face_points,
int *idx, float *depth, float *uv) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( triangle_intersect_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, cagesize, blur, n_max, ray_start, ray_dir, face_points, idx, depth, uv);
CUDA_CHECK_ERRORS();
}
__global__ void uniform_ray_sampling_kernel(
int b, int num_rays,
int max_hits,
int max_steps,
float step_size,
const int *__restrict__ pts_idx,
const float *__restrict__ min_depth,
const float *__restrict__ max_depth,
const float *__restrict__ uniform_noise,
int *__restrict__ sampled_idx,
float *__restrict__ sampled_depth,
float *__restrict__ sampled_dists) {
int batch_index = blockIdx.x;
int index = threadIdx.x;
int stride = blockDim.x;
pts_idx += batch_index * num_rays * max_hits;
min_depth += batch_index * num_rays * max_hits;
max_depth += batch_index * num_rays * max_hits;
uniform_noise += batch_index * num_rays * max_steps;
sampled_idx += batch_index * num_rays * max_steps;
sampled_depth += batch_index * num_rays * max_steps;
sampled_dists += batch_index * num_rays * max_steps;
// loop over all rays
for (int j = index; j < num_rays; j += stride) {
int H = j * max_hits, K = j * max_steps;
int s = 0, ucur = 0, umin = 0, umax = 0;
float last_min_depth, last_max_depth, curr_depth;
// sort all depths
while (true) {
if (pts_idx[H + umax] == -1 || umax == max_hits || ucur == max_steps) {
break; // reach the maximum
}
if (umin < max_hits) {
last_min_depth = min_depth[H + umin];
}
if (umax < max_hits) {
last_max_depth = max_depth[H + umax];
}
if (ucur < max_steps) {
curr_depth = min_depth[H] + (float(ucur) + uniform_noise[K + ucur]) * step_size;
}
if (last_max_depth <= curr_depth && last_max_depth <= last_min_depth) {
sampled_depth[K + s] = last_max_depth;
sampled_idx[K + s] = pts_idx[H + umax];
umax++; s++; continue;
}
if (curr_depth <= last_min_depth && curr_depth <= last_max_depth) {
sampled_depth[K + s] = curr_depth;
sampled_idx[K + s] = pts_idx[H + umin - 1];
ucur++; s++; continue;
}
if (last_min_depth <= curr_depth && last_min_depth <= last_max_depth) {
sampled_depth[K + s] = last_min_depth;
sampled_idx[K + s] = pts_idx[H + umin];
umin++; s++; continue;
}
}
float l_depth, r_depth;
int step = 0;
for (ucur = 0, umin = 0, umax = 0; ucur < max_steps - 1; ucur++) {
l_depth = sampled_depth[K + ucur];
r_depth = sampled_depth[K + ucur + 1];
sampled_depth[K + ucur] = (l_depth + r_depth) * .5;
sampled_dists[K + ucur] = (r_depth - l_depth);
if (sampled_depth[K + ucur] >= min_depth[H + umin] && umin < max_hits) umin++;
if (sampled_depth[K + ucur] >= max_depth[H + umax] && umax < max_hits) umax++;
if (umax == max_hits || pts_idx[H + umax] == -1) break;
if (umin - 1 == umax && sampled_dists[K + ucur] > 0) {
sampled_depth[K + step] = sampled_depth[K + ucur];
sampled_dists[K + step] = sampled_dists[K + ucur];
sampled_idx[K + step] = sampled_idx[K + ucur];
step++;
}
}
for (int s = step; s < max_steps; s++) {
sampled_idx[K + s] = -1;
}
}
}
void uniform_ray_sampling_kernel_wrapper(
int b, int num_rays, int max_hits, int max_steps, float step_size,
const int *pts_idx, const float *min_depth, const float *max_depth, const float *uniform_noise,
int *sampled_idx, float *sampled_depth, float *sampled_dists) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( uniform_ray_sampling_kernel), dim3(b), dim3(opt_n_threads(num_rays)), 0, stream,
b, num_rays, max_hits, max_steps, step_size, pts_idx,
min_depth, max_depth, uniform_noise, sampled_idx, sampled_depth, sampled_dists);
CUDA_CHECK_ERRORS();
}
| b43b2063186f5ca2b591a58e6b51aa944bd853f7.cu | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "cutil_math.h" // required for float3 vector math
__global__ void ball_intersect_point_kernel(
int b, int n, int m, float radius,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float x0 = ray_start[j * 3 + 0];
float y0 = ray_start[j * 3 + 1];
float z0 = ray_start[j * 3 + 2];
float xw = ray_dir[j * 3 + 0];
float yw = ray_dir[j * 3 + 1];
float zw = ray_dir[j * 3 + 2];
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float x = points[k * 3 + 0] - x0;
float y = points[k * 3 + 1] - y0;
float z = points[k * 3 + 2] - z0;
float d2 = x * x + y * y + z * z;
float d2_proj = pow(x * xw + y * yw + z * zw, 2);
float r2 = d2 - d2_proj;
if (r2 < radius2) {
idx[j * n_max + cnt] = k;
float depth = sqrt(d2_proj);
float depth_blur = sqrt(radius2 - r2);
min_depth[j * n_max + cnt] = depth - depth_blur;
max_depth[j * n_max + cnt] = depth + depth_blur;
++cnt;
}
}
}
}
__device__ float2 RayAABBIntersection(
const float3 &ori,
const float3 &dir,
const float3 ¢er,
float half_voxel) {
float f_low = 0;
float f_high = 100000.;
float f_dim_low, f_dim_high, temp, inv_ray_dir, start, aabb;
for (int d = 0; d < 3; ++d) {
switch (d) {
case 0:
inv_ray_dir = __fdividef(1.0f, dir.x); start = ori.x; aabb = center.x; break;
case 1:
inv_ray_dir = __fdividef(1.0f, dir.y); start = ori.y; aabb = center.y; break;
case 2:
inv_ray_dir = __fdividef(1.0f, dir.z); start = ori.z; aabb = center.z; break;
}
f_dim_low = (aabb - half_voxel - start) * inv_ray_dir;
f_dim_high = (aabb + half_voxel - start) * inv_ray_dir;
// Make sure low is less than high
if (f_dim_high < f_dim_low) {
temp = f_dim_low;
f_dim_low = f_dim_high;
f_dim_high = temp;
}
// If this dimension's high is less than the low we got then we definitely missed.
if (f_dim_high < f_low) {
return make_float2(-1.0f, -1.0f);
}
// Likewise if the low is less than the high.
if (f_dim_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
// Add the clip from this dimension to the previous results
f_low = (f_dim_low > f_low) ? f_dim_low : f_low;
f_high = (f_dim_high < f_high) ? f_dim_high : f_high;
if (f_low > f_high) {
return make_float2(-1.0f, -1.0f);
}
}
return make_float2(f_low, f_high);
}
__global__ void aabb_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
int batch_index = blockIdx.x;
points += batch_index * n * 3;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
for (int k = 0, cnt = 0; k < n && cnt < n_max; ++k) {
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel);
if (depths.x > -1.0f){
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt;
}
}
}
}
__global__ void svo_intersect_point_kernel(
int b, int n, int m, float voxelsize,
int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ points,
const int *__restrict__ children,
int *__restrict__ idx,
float *__restrict__ min_depth,
float *__restrict__ max_depth) {
/*
TODO: this is an inefficient implementation of the
navie Ray -- Sparse Voxel Octree Intersection.
It can be further improved using:
Revelles, Jorge, Carlos Urena, and Miguel Lastra.
"An efficient parametric algorithm for octree traversal." (2000).
*/
int batch_index = blockIdx.x;
points += batch_index * n * 3;
children += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
min_depth += batch_index * m * n_max;
max_depth += batch_index * m * n_max;
int index = threadIdx.x;
int stride = blockDim.x;
float half_voxel = voxelsize * 0.5;
for (int j = index; j < m; j += stride) {
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int stack[256] = {-1}; // DFS, initialize the stack
int ptr = 0, cnt = 0, k = -1;
stack[ptr] = n - 1; // ROOT node is always the last
while (ptr > -1 && cnt < n_max) {
assert((ptr < 256));
// evaluate the current node
k = stack[ptr];
float2 depths = RayAABBIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(points[k * 3 + 0], points[k * 3 + 1], points[k * 3 + 2]),
half_voxel * float(children[k * 9 + 8]));
stack[ptr] = -1; ptr--;
if (depths.x > -1.0f) { // ray did not miss the voxel
// TODO: here it should be able to know which children is ok, further optimize the code
if (children[k * 9 + 8] == 1) { // this is a terminal node
idx[j * n_max + cnt] = k;
min_depth[j * n_max + cnt] = depths.x;
max_depth[j * n_max + cnt] = depths.y;
++cnt; continue;
}
for (int u = 0; u < 8; u++) {
if (children[k * 9 + u] > -1) {
ptr++; stack[ptr] = children[k * 9 + u]; // push child to the stack
}
}
}
}
}
}
__device__ float3 RayTriangleIntersection(
const float3 &ori,
const float3 &dir,
const float3 &v0,
const float3 &v1,
const float3 &v2,
float blur) {
float3 v0v1 = v1 - v0;
float3 v0v2 = v2 - v0;
float3 v0O = ori - v0;
float3 dir_crs_v0v2 = cross(dir, v0v2);
float det = dot(v0v1, dir_crs_v0v2);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(v0O, dir_crs_v0v2) * det;
if (u < 0.0f - blur || u > 1.0f + blur)
return make_float3(-1.0f, 0.0f, 0.0f);
float3 v0O_crs_v0v1 = cross(v0O, v0v1);
float v = dot(dir, v0O_crs_v0v1) * det;
if (v < 0.0f - blur || v > 1.0f + blur)
return make_float3(-1.0f, 0.0f, 0.0f);
if ((u + v) < 0.0f - blur || (u + v) > 1.0f + blur)
return make_float3(-1.0f, 0.0f, 0.0f);
float t = dot(v0v2, v0O_crs_v0v1) * det;
return make_float3(t, u, v);
}
__global__ void triangle_intersect_point_kernel(
int b, int n, int m, float cagesize,
float blur, int n_max,
const float *__restrict__ ray_start,
const float *__restrict__ ray_dir,
const float *__restrict__ face_points,
int *__restrict__ idx,
float *__restrict__ depth,
float *__restrict__ uv) {
int batch_index = blockIdx.x;
face_points += batch_index * n * 9;
ray_start += batch_index * m * 3;
ray_dir += batch_index * m * 3;
idx += batch_index * m * n_max;
depth += batch_index * m * n_max * 3;
uv += batch_index * m * n_max * 2;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < m; j += stride) {
// go over rays
for (int l = 0; l < n_max; ++l) {
idx[j * n_max + l] = -1;
}
int cnt = 0;
for (int k = 0; k < n && cnt < n_max; ++k) {
// go over triangles
float3 tuv = RayTriangleIntersection(
make_float3(ray_start[j * 3 + 0], ray_start[j * 3 + 1], ray_start[j * 3 + 2]),
make_float3(ray_dir[j * 3 + 0], ray_dir[j * 3 + 1], ray_dir[j * 3 + 2]),
make_float3(face_points[k * 9 + 0], face_points[k * 9 + 1], face_points[k * 9 + 2]),
make_float3(face_points[k * 9 + 3], face_points[k * 9 + 4], face_points[k * 9 + 5]),
make_float3(face_points[k * 9 + 6], face_points[k * 9 + 7], face_points[k * 9 + 8]),
blur);
if (tuv.x > 0) {
int ki = k;
float d = tuv.x, u = tuv.y, v = tuv.z;
// sort
for (int l = 0; l < cnt; l++) {
if (d < depth[j * n_max * 3 + l * 3]) {
swap(ki, idx[j * n_max + l]);
swap(d, depth[j * n_max * 3 + l * 3]);
swap(u, uv[j * n_max * 2 + l * 2]);
swap(v, uv[j * n_max * 2 + l * 2 + 1]);
}
}
idx[j * n_max + cnt] = ki;
depth[j * n_max * 3 + cnt * 3] = d;
uv[j * n_max * 2 + cnt * 2] = u;
uv[j * n_max * 2 + cnt * 2 + 1] = v;
cnt++;
}
}
for (int l = 0; l < cnt; l++) {
// compute min_depth
if (l == 0)
depth[j * n_max * 3 + l * 3 + 1] = -cagesize;
else
depth[j * n_max * 3 + l * 3 + 1] = -fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3] - depth[j * n_max * 3 + l * 3 - 3]));
// compute max_depth
if (l == cnt - 1)
depth[j * n_max * 3 + l * 3 + 2] = cagesize;
else
depth[j * n_max * 3 + l * 3 + 2] = fminf(cagesize,
.5 * (depth[j * n_max * 3 + l * 3 + 3] - depth[j * n_max * 3 + l * 3]));
}
}
}
void ball_intersect_point_kernel_wrapper(
int b, int n, int m, float radius, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
ball_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, radius, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void aabb_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points,
int *idx, float *min_depth, float *max_depth) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
aabb_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void svo_intersect_point_kernel_wrapper(
int b, int n, int m, float voxelsize, int n_max,
const float *ray_start, const float *ray_dir, const float *points, const int *children,
int *idx, float *min_depth, float *max_depth) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
svo_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, voxelsize, n_max, ray_start, ray_dir, points, children, idx, min_depth, max_depth);
CUDA_CHECK_ERRORS();
}
void triangle_intersect_point_kernel_wrapper(
int b, int n, int m, float cagesize, float blur, int n_max,
const float *ray_start, const float *ray_dir, const float *face_points,
int *idx, float *depth, float *uv) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
triangle_intersect_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, cagesize, blur, n_max, ray_start, ray_dir, face_points, idx, depth, uv);
CUDA_CHECK_ERRORS();
}
__global__ void uniform_ray_sampling_kernel(
int b, int num_rays,
int max_hits,
int max_steps,
float step_size,
const int *__restrict__ pts_idx,
const float *__restrict__ min_depth,
const float *__restrict__ max_depth,
const float *__restrict__ uniform_noise,
int *__restrict__ sampled_idx,
float *__restrict__ sampled_depth,
float *__restrict__ sampled_dists) {
int batch_index = blockIdx.x;
int index = threadIdx.x;
int stride = blockDim.x;
pts_idx += batch_index * num_rays * max_hits;
min_depth += batch_index * num_rays * max_hits;
max_depth += batch_index * num_rays * max_hits;
uniform_noise += batch_index * num_rays * max_steps;
sampled_idx += batch_index * num_rays * max_steps;
sampled_depth += batch_index * num_rays * max_steps;
sampled_dists += batch_index * num_rays * max_steps;
// loop over all rays
for (int j = index; j < num_rays; j += stride) {
int H = j * max_hits, K = j * max_steps;
int s = 0, ucur = 0, umin = 0, umax = 0;
float last_min_depth, last_max_depth, curr_depth;
// sort all depths
while (true) {
if (pts_idx[H + umax] == -1 || umax == max_hits || ucur == max_steps) {
break; // reach the maximum
}
if (umin < max_hits) {
last_min_depth = min_depth[H + umin];
}
if (umax < max_hits) {
last_max_depth = max_depth[H + umax];
}
if (ucur < max_steps) {
curr_depth = min_depth[H] + (float(ucur) + uniform_noise[K + ucur]) * step_size;
}
if (last_max_depth <= curr_depth && last_max_depth <= last_min_depth) {
sampled_depth[K + s] = last_max_depth;
sampled_idx[K + s] = pts_idx[H + umax];
umax++; s++; continue;
}
if (curr_depth <= last_min_depth && curr_depth <= last_max_depth) {
sampled_depth[K + s] = curr_depth;
sampled_idx[K + s] = pts_idx[H + umin - 1];
ucur++; s++; continue;
}
if (last_min_depth <= curr_depth && last_min_depth <= last_max_depth) {
sampled_depth[K + s] = last_min_depth;
sampled_idx[K + s] = pts_idx[H + umin];
umin++; s++; continue;
}
}
float l_depth, r_depth;
int step = 0;
for (ucur = 0, umin = 0, umax = 0; ucur < max_steps - 1; ucur++) {
l_depth = sampled_depth[K + ucur];
r_depth = sampled_depth[K + ucur + 1];
sampled_depth[K + ucur] = (l_depth + r_depth) * .5;
sampled_dists[K + ucur] = (r_depth - l_depth);
if (sampled_depth[K + ucur] >= min_depth[H + umin] && umin < max_hits) umin++;
if (sampled_depth[K + ucur] >= max_depth[H + umax] && umax < max_hits) umax++;
if (umax == max_hits || pts_idx[H + umax] == -1) break;
if (umin - 1 == umax && sampled_dists[K + ucur] > 0) {
sampled_depth[K + step] = sampled_depth[K + ucur];
sampled_dists[K + step] = sampled_dists[K + ucur];
sampled_idx[K + step] = sampled_idx[K + ucur];
step++;
}
}
for (int s = step; s < max_steps; s++) {
sampled_idx[K + s] = -1;
}
}
}
void uniform_ray_sampling_kernel_wrapper(
int b, int num_rays, int max_hits, int max_steps, float step_size,
const int *pts_idx, const float *min_depth, const float *max_depth, const float *uniform_noise,
int *sampled_idx, float *sampled_depth, float *sampled_dists) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
uniform_ray_sampling_kernel<<<b, opt_n_threads(num_rays), 0, stream>>>(
b, num_rays, max_hits, max_steps, step_size, pts_idx,
min_depth, max_depth, uniform_noise, sampled_idx, sampled_depth, sampled_dists);
CUDA_CHECK_ERRORS();
}
|
375d4bc21eee1022fbe5abca23f0ff59304023d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
namespace pcl
{
namespace device
{
__device__ __forceinline__ float
getMinTime (const float3& volume_max, const float3& origin, const float3& dir)
{
float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x;
float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y;
float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z;
return fmax ( fmax (txmin, tymin), tzmin);
}
__device__ __forceinline__ float
getMaxTime (const float3& volume_max, const float3& origin, const float3& dir)
{
float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x;
float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y;
float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z;
return fmin (fmin (txmax, tymax), tzmax);
}
struct RayCaster
{
enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 };
Mat33 Rcurr;
float3 tcurr;
float time_step;
float3 volume_size;
float3 cell_size;
int cols, rows;
PtrStep<volume_elem_type> volume;
Intr intr;
mutable PtrStep<float> nmap;
mutable PtrStep<float> vmap;
mutable PtrStep<float> error;
bool hasImageError;
ClippingPlane clippingPlane;
bool hasClipping;
__device__ __forceinline__ float3
get_ray_next (int x, int y) const
{
float3 ray_next;
ray_next.x = (x - intr.cx) / intr.fx;
ray_next.y = (y - intr.cy) / intr.fy;
ray_next.z = 1;
return ray_next;
}
__device__ __forceinline__ bool
checkInds (const int3& g) const
{
return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_X);
}
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ int3
getVoxel (float3 point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& origin, const float3& dir, float time) const
{
return interpolateTrilineary (origin + dir * time);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
if (g.x <= 0 || g.x >= VOLUME_X - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.y <= 0 || g.y >= VOLUME_Y - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.z <= 0 || g.z >= VOLUME_Z - 1)
return numeric_limits<float>::quiet_NaN ();
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if(hasImageError)
error.ptr(y)[x] = -1;
if(hasClipping)
clippingPlane.clippedRegion[y * cols + x] = 0;
if (x >= cols || y >= rows)
return;
vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
float3 ray_start = tcurr;
float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr;
float3 ray_dir = normalized (ray_next - ray_start);
//ensure that it isn't a degenerate case
ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x;
ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y;
ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z;
// computer time when entry and exit volume
float time_start_volume = getMinTime (volume_size, ray_start, ray_dir);
float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir);
const float min_dist = 0.f; //in mm
time_start_volume = fmax (time_start_volume, min_dist);
if (time_start_volume >= time_exit_volume)
return;
int time_curr = time_start_volume;
int3 g = getVoxel (ray_start + ray_dir * time_curr);
g.x = max (0, min (g.x, VOLUME_X - 1));
g.y = max (0, min (g.y, VOLUME_Y - 1));
g.z = max (0, min (g.z, VOLUME_Z - 1));
float tsdf = readTsdf (g.x, g.y, g.z);
//infinite loop guard
const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z);
for (; time_curr < max_time; time_curr += time_step)
{
float tsdf_prev = tsdf;
int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) );
if (!checkInds (g))
break;
tsdf = readTsdf (g.x, g.y, g.z);
if (tsdf_prev < 0.f && tsdf > 0.f)
break;
if (tsdf_prev > 0.f && tsdf < 0.f) //zero crossing
{
if(hasImageError)
error.ptr(y)[x] = tsdf_prev - tsdf;
if(hasClipping)
if(!(g.x >= clippingPlane.leftX && g.x <= clippingPlane.rightX && g.y >= clippingPlane.upY && g.y <= clippingPlane.downY
&& g.z >= clippingPlane.frontZ && g.z <= clippingPlane.backZ))
clippingPlane.clippedRegion[y * cols + x] = 255;
float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step);
if (isnan (Ftdt))
break;
float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr);
if (isnan (Ft))
break;
//float Ts = time_curr - time_step * Ft/(Ftdt - Ft);
float Ts = time_curr - time_step * Ft / (Ftdt - Ft);
float3 vetex_found = ray_start + ray_dir * Ts;
vmap.ptr (y )[x] = vetex_found.x;
vmap.ptr (y + rows)[x] = vetex_found.y;
vmap.ptr (y + 2 * rows)[x] = vetex_found.z;
int3 g = getVoxel ( ray_start + ray_dir * time_curr );
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
float3 n;
t = vetex_found;
t.x += cell_size.x / 4;
float Fx1 = interpolateTrilineary (t);
t = vetex_found;
t.x -= cell_size.x / 4;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = vetex_found;
t.y += cell_size.y / 4;
float Fy1 = interpolateTrilineary (t);
t = vetex_found;
t.y -= cell_size.y / 4;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = vetex_found;
t.z += cell_size.z / 4;
float Fz1 = interpolateTrilineary (t);
t = vetex_found;
t.z -= cell_size.z / 4;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
nmap.ptr (y )[x] = n.x;
nmap.ptr (y + rows)[x] = n.y;
nmap.ptr (y + 2 * rows)[x] = n.z;
}
break;
}
} /* for(;;) */
}
};
__global__ void
rayCastKernel (const RayCaster rc) {
rc ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, const float3& volume_size,
const PtrStep<volume_elem_type>& volume, MapArr& vmap, MapArr& nmap)
{
RayCaster rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.8f;
rc.volume_size = volume_size;
rc.cell_size.x = volume_size.x / VOLUME_X;
rc.cell_size.y = volume_size.y / VOLUME_Y;
rc.cell_size.z = volume_size.z / VOLUME_Z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.volume = volume;
rc.vmap = vmap;
rc.nmap = nmap;
rc.hasImageError = false;
rc.hasClipping = false;
dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
rayCastKernel << < grid, block >> > (rc);
cudaSafeCall (hipGetLastError ());
//cudaSafeCall(hipDeviceSynchronize());
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, const float3& volume_size,
const PtrStep<volume_elem_type>& volume, MapArr& vmap, MapArr& nmap, DeviceArray2D<float>& error)
{
RayCaster rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.8f;
rc.volume_size = volume_size;
rc.cell_size.x = volume_size.x / VOLUME_X;
rc.cell_size.y = volume_size.y / VOLUME_Y;
rc.cell_size.z = volume_size.z / VOLUME_Z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.volume = volume;
rc.vmap = vmap;
rc.nmap = nmap;
rc.error = error;
rc.hasImageError = true;
rc.hasClipping = false;
dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
rayCastKernel << < grid, block >> > (rc);
cudaSafeCall (hipGetLastError ());
//cudaSafeCall(hipDeviceSynchronize());
}
void
pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, const float3& volume_size,
const PtrStep<volume_elem_type>& volume, MapArr& vmap, MapArr& nmap,
ClippingPlane& clipPlane)
{
RayCaster rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.6f;
rc.volume_size = volume_size;
rc.cell_size.x = volume_size.x / VOLUME_X;
rc.cell_size.y = volume_size.y / VOLUME_Y;
rc.cell_size.z = volume_size.z / VOLUME_Z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.volume = volume;
rc.vmap = vmap;
rc.nmap = nmap;
rc.hasImageError = false;
rc.hasClipping = true;
rc.clippingPlane = clipPlane;
dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
rayCastKernel << < grid, block >> > (rc);
cudaSafeCall (hipGetLastError ());
} | 375d4bc21eee1022fbe5abca23f0ff59304023d4.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
namespace pcl
{
namespace device
{
__device__ __forceinline__ float
getMinTime (const float3& volume_max, const float3& origin, const float3& dir)
{
float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x;
float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y;
float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z;
return fmax ( fmax (txmin, tymin), tzmin);
}
__device__ __forceinline__ float
getMaxTime (const float3& volume_max, const float3& origin, const float3& dir)
{
float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x;
float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y;
float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z;
return fmin (fmin (txmax, tymax), tzmax);
}
struct RayCaster
{
enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 };
Mat33 Rcurr;
float3 tcurr;
float time_step;
float3 volume_size;
float3 cell_size;
int cols, rows;
PtrStep<volume_elem_type> volume;
Intr intr;
mutable PtrStep<float> nmap;
mutable PtrStep<float> vmap;
mutable PtrStep<float> error;
bool hasImageError;
ClippingPlane clippingPlane;
bool hasClipping;
__device__ __forceinline__ float3
get_ray_next (int x, int y) const
{
float3 ray_next;
ray_next.x = (x - intr.cx) / intr.fx;
ray_next.y = (y - intr.cy) / intr.fy;
ray_next.z = 1;
return ray_next;
}
__device__ __forceinline__ bool
checkInds (const int3& g) const
{
return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_X);
}
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ int3
getVoxel (float3 point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& origin, const float3& dir, float time) const
{
return interpolateTrilineary (origin + dir * time);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
if (g.x <= 0 || g.x >= VOLUME_X - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.y <= 0 || g.y >= VOLUME_Y - 1)
return numeric_limits<float>::quiet_NaN ();
if (g.z <= 0 || g.z >= VOLUME_Z - 1)
return numeric_limits<float>::quiet_NaN ();
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if(hasImageError)
error.ptr(y)[x] = -1;
if(hasClipping)
clippingPlane.clippedRegion[y * cols + x] = 0;
if (x >= cols || y >= rows)
return;
vmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
nmap.ptr (y)[x] = numeric_limits<float>::quiet_NaN ();
float3 ray_start = tcurr;
float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr;
float3 ray_dir = normalized (ray_next - ray_start);
//ensure that it isn't a degenerate case
ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x;
ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y;
ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z;
// computer time when entry and exit volume
float time_start_volume = getMinTime (volume_size, ray_start, ray_dir);
float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir);
const float min_dist = 0.f; //in mm
time_start_volume = fmax (time_start_volume, min_dist);
if (time_start_volume >= time_exit_volume)
return;
int time_curr = time_start_volume;
int3 g = getVoxel (ray_start + ray_dir * time_curr);
g.x = max (0, min (g.x, VOLUME_X - 1));
g.y = max (0, min (g.y, VOLUME_Y - 1));
g.z = max (0, min (g.z, VOLUME_Z - 1));
float tsdf = readTsdf (g.x, g.y, g.z);
//infinite loop guard
const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z);
for (; time_curr < max_time; time_curr += time_step)
{
float tsdf_prev = tsdf;
int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) );
if (!checkInds (g))
break;
tsdf = readTsdf (g.x, g.y, g.z);
if (tsdf_prev < 0.f && tsdf > 0.f)
break;
if (tsdf_prev > 0.f && tsdf < 0.f) //zero crossing
{
if(hasImageError)
error.ptr(y)[x] = tsdf_prev - tsdf;
if(hasClipping)
if(!(g.x >= clippingPlane.leftX && g.x <= clippingPlane.rightX && g.y >= clippingPlane.upY && g.y <= clippingPlane.downY
&& g.z >= clippingPlane.frontZ && g.z <= clippingPlane.backZ))
clippingPlane.clippedRegion[y * cols + x] = 255;
float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step);
if (isnan (Ftdt))
break;
float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr);
if (isnan (Ft))
break;
//float Ts = time_curr - time_step * Ft/(Ftdt - Ft);
float Ts = time_curr - time_step * Ft / (Ftdt - Ft);
float3 vetex_found = ray_start + ray_dir * Ts;
vmap.ptr (y )[x] = vetex_found.x;
vmap.ptr (y + rows)[x] = vetex_found.y;
vmap.ptr (y + 2 * rows)[x] = vetex_found.z;
int3 g = getVoxel ( ray_start + ray_dir * time_curr );
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
float3 n;
t = vetex_found;
t.x += cell_size.x / 4;
float Fx1 = interpolateTrilineary (t);
t = vetex_found;
t.x -= cell_size.x / 4;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = vetex_found;
t.y += cell_size.y / 4;
float Fy1 = interpolateTrilineary (t);
t = vetex_found;
t.y -= cell_size.y / 4;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = vetex_found;
t.z += cell_size.z / 4;
float Fz1 = interpolateTrilineary (t);
t = vetex_found;
t.z -= cell_size.z / 4;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
nmap.ptr (y )[x] = n.x;
nmap.ptr (y + rows)[x] = n.y;
nmap.ptr (y + 2 * rows)[x] = n.z;
}
break;
}
} /* for(;;) */
}
};
__global__ void
rayCastKernel (const RayCaster rc) {
rc ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, const float3& volume_size,
const PtrStep<volume_elem_type>& volume, MapArr& vmap, MapArr& nmap)
{
RayCaster rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.8f;
rc.volume_size = volume_size;
rc.cell_size.x = volume_size.x / VOLUME_X;
rc.cell_size.y = volume_size.y / VOLUME_Y;
rc.cell_size.z = volume_size.z / VOLUME_Z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.volume = volume;
rc.vmap = vmap;
rc.nmap = nmap;
rc.hasImageError = false;
rc.hasClipping = false;
dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
rayCastKernel << < grid, block >> > (rc);
cudaSafeCall (cudaGetLastError ());
//cudaSafeCall(cudaDeviceSynchronize());
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, const float3& volume_size,
const PtrStep<volume_elem_type>& volume, MapArr& vmap, MapArr& nmap, DeviceArray2D<float>& error)
{
RayCaster rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.8f;
rc.volume_size = volume_size;
rc.cell_size.x = volume_size.x / VOLUME_X;
rc.cell_size.y = volume_size.y / VOLUME_Y;
rc.cell_size.z = volume_size.z / VOLUME_Z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.volume = volume;
rc.vmap = vmap;
rc.nmap = nmap;
rc.error = error;
rc.hasImageError = true;
rc.hasClipping = false;
dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
rayCastKernel << < grid, block >> > (rc);
cudaSafeCall (cudaGetLastError ());
//cudaSafeCall(cudaDeviceSynchronize());
}
void
pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr,
float tranc_dist, const float3& volume_size,
const PtrStep<volume_elem_type>& volume, MapArr& vmap, MapArr& nmap,
ClippingPlane& clipPlane)
{
RayCaster rc;
rc.Rcurr = Rcurr;
rc.tcurr = tcurr;
rc.time_step = tranc_dist * 0.6f;
rc.volume_size = volume_size;
rc.cell_size.x = volume_size.x / VOLUME_X;
rc.cell_size.y = volume_size.y / VOLUME_Y;
rc.cell_size.z = volume_size.z / VOLUME_Z;
rc.cols = vmap.cols ();
rc.rows = vmap.rows () / 3;
rc.intr = intr;
rc.volume = volume;
rc.vmap = vmap;
rc.nmap = nmap;
rc.hasImageError = false;
rc.hasClipping = true;
rc.clippingPlane = clipPlane;
dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y);
dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y));
rayCastKernel << < grid, block >> > (rc);
cudaSafeCall (cudaGetLastError ());
} |
8ba73eea34db0e1dc851390e7cb4195ab5b4d9bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include "template.hu"
#define BLOCK_SIZE 512
#define TILE_SIZE 2048
#define BLOCK_SIZE_CIRCULAR 512
#define TILE_SIZE_CIRCULAR 2048
// Ceiling funciton for X / Y.
__host__ __device__ static inline int ceil_div(int x, int y) {
return (x - 1) / y + 1;
}
/******************************************************************************
GPU kernels
*******************************************************************************/
/*
* Sequential merge implementation is given. You can use it in your kernels.
*/
__device__ void merge_sequential(float* A, int A_len, float* B, int B_len, float* C) {
int i = 0, j = 0, k = 0;
while ((i < A_len) && (j < B_len)) {
C[k++] = A[i] <= B[j] ? A[i++] : B[j++];
}
if (i == A_len) {
while (j < B_len) {
C[k++] = B[j++];
}
} else {
while (i < A_len) {
C[k++] = A[i++];
}
}
}
__device__ void merge_sequential_circular(float*A, int m, float*B, int n, float* C, int A_S_start, int B_S_start){
int i = 0, j = 0, k = 0;
while(i < m && j < n){
int i_cir = (A_S_start + i) % TILE_SIZE_CIRCULAR;
int j_cir = (B_S_start + j) % TILE_SIZE_CIRCULAR;
if(A[i_cir] <= B[j_cir]){
C[k++] = A[i_cir];
i++;
}
else{
C[k++] = B[j_cir];
j++;
}
}
if(i==m){
for(; j < n; j++){
int j_cir = (B_S_start + j) % TILE_SIZE_CIRCULAR;
C[k++] = B[j_cir];
}
}
else{
for(; i < m; i++){
int i_cir = (A_S_start + i) % TILE_SIZE_CIRCULAR;
C[k++] = A[i_cir];
}
}
}
__device__ int co_rank(int k, float *A, int m, float *B, int n) {
int i = k < m?k:m;
int j = k - i;
int i_low = 0>(k-n)?0:k-n;
int j_low = 0>(k-m)?0:k-m;
int delta;
bool active=true;
while(active){
if (i > 0 && j < n && A[i-1] > B[j]){
delta = ((i - i_low +1) >> 1);
j_low = j;
j += delta;
i -= delta;
}
else if(j > 0 && i < m && B[j-1] >= A[i]){
delta = ((j - j_low +1) >> 1);
i_low = i;
i = i + delta;
j = j - delta;
}
else{
active = false;
}
}
return i;
}
__device__ int co_rank_circular(int k, float *A, int m, float *B, int n, int A_S_start, int B_S_start){
int i = k < m?k:m;
int j = k - i;
int i_low = 0>(k-n)?0:k-n;
int j_low = 0>(k-m)?0:k-m;
int delta = 1;
bool active=true;
while(active){
int i_cir = (A_S_start+i>=TILE_SIZE_CIRCULAR)?A_S_start+i-TILE_SIZE_CIRCULAR:A_S_start+i;
int i_m_1_cir = (A_S_start+i-1>=TILE_SIZE_CIRCULAR)?A_S_start+i-TILE_SIZE_CIRCULAR-1:A_S_start+i-1;
int j_cir = (B_S_start+j>=TILE_SIZE_CIRCULAR)?B_S_start+j-TILE_SIZE_CIRCULAR:B_S_start+j;
int j_m_1_cir = (B_S_start+j-1>=TILE_SIZE_CIRCULAR)?B_S_start+j-TILE_SIZE_CIRCULAR-1:B_S_start+j-1;
if(i > 0 && j < n && A[i_m_1_cir] > B[j_cir]){
delta = ((i - i_low +1) >> 1);
j_low = j;
j += delta;
i -= delta;
} else if(j > 0 && i < m && B[j_m_1_cir] >= A[i_cir]) {
delta = ((j - j_low +1) >> 1) ;
i_low = i;
i = i + delta;
j = j - delta;
} else {
active = false;
}
}
return i;
}
/*
* Basic parallel merge kernel using co-rank function
* A, A_len - input array A and its length
* B, B_len - input array B and its length
* C - output array holding the merged elements.
* Length of C is A_len + B_len (size pre-allocated for you)
*/
__global__ void gpu_merge_basic_kernel(float* A, int A_len, float* B, int B_len, float* C) {
/* Your code here */
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int m=A_len, n=B_len;
int k_curr = tid*ceil_div(m+n,blockDim.x*gridDim.x);
int k_next = min((tid+1) * ceil_div(m+n,blockDim.x*gridDim.x), m+n);
int i_curr = co_rank(k_curr, A, m, B, n);
int i_next = co_rank(k_next, A, m, B, n);
int j_curr = k_curr - i_curr; int j_next = k_next - i_next;
merge_sequential( &A[i_curr], i_next-i_curr, &B[j_curr], j_next-j_curr, &C[k_curr] );
}
/*
* Arguments are the same as gpu_merge_basic_kernel.
* In this kernel, use shared memory to increase the reuse.
*/
__global__ void gpu_merge_tiled_kernel(float* A, int A_len, float* B, int B_len, float* C) {
/* Your code here */
extern __shared__ float shareAB[];
float* A_S = &shareAB[0];
float* B_S = &shareAB[TILE_SIZE];
int m = A_len, n = B_len;
int tx = threadIdx.x;
int bx = blockIdx.x;
int C_curr = bx * ceil_div(m+n,gridDim.x);
int C_next = min((bx+1) * ceil_div(m+n, gridDim.x), m+n);
int A_curr = co_rank(C_curr, A, m, B, n);
int B_curr = C_curr - A_curr;
int A_next = co_rank(C_next, A, m, B, n);
int B_next = C_next - A_next;
__syncthreads();
int counter = 0;
int C_length = C_next - C_curr;
int A_length = A_next - A_curr;
int B_length = B_next - B_curr;
int total_iteration = ceil_div(C_length, TILE_SIZE);
int C_completed = 0;
int A_consumed = 0;
int B_consumed = 0;
while(counter < total_iteration) {
for(int i=0; i<TILE_SIZE; i+=blockDim.x){
if(i + tx < A_length - A_consumed) {
A_S[i + tx] = A[A_curr + A_consumed + i + tx];
}
}
for(int i=0; i<TILE_SIZE; i+=blockDim.x){
if(i + tx < B_length - B_consumed) {
B_S[i + tx] = B[B_curr + B_consumed + i + tx];
}
}
__syncthreads();
int c_curr = tx * (TILE_SIZE/blockDim.x);
int c_next = (tx+1) * (TILE_SIZE/blockDim.x);
c_curr = c_curr<=(C_length-C_completed)?c_curr:C_length-C_completed;
c_next = c_next<=(C_length-C_completed)?c_next:C_length-C_completed;
int a_curr = co_rank(c_curr, A_S, min(TILE_SIZE, A_length-A_consumed), B_S, min(TILE_SIZE, B_length-B_consumed));
int b_curr = c_curr - a_curr;
int a_next = co_rank(c_next, A_S, min(TILE_SIZE, A_length-A_consumed), B_S, min(TILE_SIZE, B_length-B_consumed));
int b_next = c_next - a_next;
merge_sequential(&A_S[a_curr], a_next-a_curr, &B_S[b_curr], b_next-b_curr, &C[C_curr+C_completed+c_curr]);
counter ++;
C_completed += TILE_SIZE;
A_consumed += co_rank(TILE_SIZE, A_S, TILE_SIZE, B_S, TILE_SIZE);
B_consumed = C_completed - A_consumed;
__syncthreads();
}
}
/*
* gpu_merge_circular_buffer_kernel is optional.
* The implementation will be similar to tiled merge kernel.
* You'll have to modify co-rank function and sequential_merge
* to accommodate circular buffer.
*/
__global__ void gpu_merge_circular_buffer_kernel(float* A, int A_len, float* B, int B_len, float* C) {
/* Your code here */
extern __shared__ float shareAB[];
float* A_S = &shareAB[0];
float* B_S = &shareAB[TILE_SIZE_CIRCULAR];
int tx = threadIdx.x, bx = blockIdx.x;
int A_S_start = 0;
int B_S_start = 0;
int A_S_consumed = TILE_SIZE_CIRCULAR;
int B_S_consumed = TILE_SIZE_CIRCULAR;
int m = A_len, n = B_len;
int C_curr = bx * ceil_div(m+n,gridDim.x);
int C_next = min((bx+1) * ceil_div(m+n, gridDim.x), m+n);
int A_curr = co_rank(C_curr, A, m, B, n);
int B_curr = C_curr - A_curr;
int A_next = co_rank(C_next, A, m, B, n);
int B_next = C_next - A_next;
__syncthreads();
int counter = 0;
int C_length = C_next - C_curr;
int A_length = A_next - A_curr;
int B_length = B_next - B_curr;
int total_iteration = ceil_div(C_length, TILE_SIZE_CIRCULAR);
int C_completed = 0;
int A_consumed = 0;
int B_consumed = 0;
while(counter < total_iteration) {
for(int i = 0; i < A_S_consumed; i += blockDim.x){
if(i + tx < A_length-A_consumed && i + tx < A_S_consumed){
int load_index = A_S_start + (TILE_SIZE_CIRCULAR - A_S_consumed) + i + tx;
load_index %= TILE_SIZE_CIRCULAR;
A_S[load_index] = A[A_curr + A_consumed + i + tx];
}
}
for(int i = 0; i < B_S_consumed; i += blockDim.x){
if(i + tx < B_length-B_consumed && i + tx < B_S_consumed){
int load_index = B_S_start + (TILE_SIZE_CIRCULAR - B_S_consumed) + i + tx;
load_index %= TILE_SIZE_CIRCULAR;
B_S[load_index] = B[B_curr + B_consumed + i + tx];
}
}
__syncthreads();
int c_curr = tx * (TILE_SIZE_CIRCULAR/blockDim.x);
int c_next = (tx+1) * (TILE_SIZE_CIRCULAR/blockDim.x);
c_curr = c_curr<=(C_length-C_completed)?c_curr:C_length-C_completed;
c_next = c_next<=(C_length-C_completed)?c_next:C_length-C_completed;
int a_curr = co_rank_circular(c_curr, A_S, min(TILE_SIZE_CIRCULAR, A_length-A_consumed), B_S, min(TILE_SIZE_CIRCULAR, B_length-B_consumed),A_S_start,B_S_start);
int b_curr = c_curr - a_curr;
int a_next = co_rank_circular(c_next, A_S, min(TILE_SIZE_CIRCULAR, A_length-A_consumed), B_S, min(TILE_SIZE_CIRCULAR, B_length-B_consumed),A_S_start,B_S_start);
int b_next = c_next - a_next;
merge_sequential_circular(A_S, a_next-a_curr, B_S, b_next-b_curr, &C[C_curr+C_completed+c_curr], A_S_start+a_curr, B_S_start+b_curr);
A_S_consumed = co_rank_circular(min(TILE_SIZE_CIRCULAR, C_length-C_completed), A_S, min(TILE_SIZE_CIRCULAR, A_length-A_consumed), B_S, min(TILE_SIZE_CIRCULAR, B_length-B_consumed), A_S_start, B_S_start);
B_S_consumed = min(TILE_SIZE_CIRCULAR, C_length-C_completed) - A_S_consumed;
A_consumed += A_S_consumed;
C_completed += min(TILE_SIZE_CIRCULAR, C_length-C_completed);
B_consumed = C_completed - A_consumed;
A_S_start += A_S_consumed;
if(A_S_start >= TILE_SIZE_CIRCULAR){
A_S_start -= TILE_SIZE_CIRCULAR;
}
B_S_start += B_S_consumed;
if(B_S_start >= TILE_SIZE_CIRCULAR){
B_S_start -= TILE_SIZE_CIRCULAR;
}
counter += 1;
__syncthreads();
}
}
/******************************************************************************
Functions
*******************************************************************************/
void gpu_basic_merge(float* A, int A_len, float* B, int B_len, float* C) {
const int numBlocks = 128;
hipLaunchKernelGGL(( gpu_merge_basic_kernel), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, A, A_len, B, B_len, C);
}
void gpu_tiled_merge(float* A, int A_len, float* B, int B_len, float* C) {
const int numBlocks = 128;
size_t shemm_size;
shemm_size = 2*TILE_SIZE*sizeof(float);
hipLaunchKernelGGL(( gpu_merge_tiled_kernel), dim3(numBlocks), dim3(BLOCK_SIZE), shemm_size, 0, A, A_len, B, B_len, C);
}
void gpu_circular_buffer_merge(float* A, int A_len, float* B, int B_len, float* C) {
const int numBlocks = 128;
size_t shemm_size;
shemm_size = 2*TILE_SIZE_CIRCULAR*sizeof(float);
hipLaunchKernelGGL(( gpu_merge_circular_buffer_kernel), dim3(numBlocks), dim3(BLOCK_SIZE_CIRCULAR), shemm_size, 0, A, A_len, B, B_len, C);
}
| 8ba73eea34db0e1dc851390e7cb4195ab5b4d9bd.cu | #include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include "template.hu"
#define BLOCK_SIZE 512
#define TILE_SIZE 2048
#define BLOCK_SIZE_CIRCULAR 512
#define TILE_SIZE_CIRCULAR 2048
// Ceiling funciton for X / Y.
__host__ __device__ static inline int ceil_div(int x, int y) {
return (x - 1) / y + 1;
}
/******************************************************************************
GPU kernels
*******************************************************************************/
/*
* Sequential merge implementation is given. You can use it in your kernels.
*/
__device__ void merge_sequential(float* A, int A_len, float* B, int B_len, float* C) {
int i = 0, j = 0, k = 0;
while ((i < A_len) && (j < B_len)) {
C[k++] = A[i] <= B[j] ? A[i++] : B[j++];
}
if (i == A_len) {
while (j < B_len) {
C[k++] = B[j++];
}
} else {
while (i < A_len) {
C[k++] = A[i++];
}
}
}
__device__ void merge_sequential_circular(float*A, int m, float*B, int n, float* C, int A_S_start, int B_S_start){
int i = 0, j = 0, k = 0;
while(i < m && j < n){
int i_cir = (A_S_start + i) % TILE_SIZE_CIRCULAR;
int j_cir = (B_S_start + j) % TILE_SIZE_CIRCULAR;
if(A[i_cir] <= B[j_cir]){
C[k++] = A[i_cir];
i++;
}
else{
C[k++] = B[j_cir];
j++;
}
}
if(i==m){
for(; j < n; j++){
int j_cir = (B_S_start + j) % TILE_SIZE_CIRCULAR;
C[k++] = B[j_cir];
}
}
else{
for(; i < m; i++){
int i_cir = (A_S_start + i) % TILE_SIZE_CIRCULAR;
C[k++] = A[i_cir];
}
}
}
__device__ int co_rank(int k, float *A, int m, float *B, int n) {
int i = k < m?k:m;
int j = k - i;
int i_low = 0>(k-n)?0:k-n;
int j_low = 0>(k-m)?0:k-m;
int delta;
bool active=true;
while(active){
if (i > 0 && j < n && A[i-1] > B[j]){
delta = ((i - i_low +1) >> 1);
j_low = j;
j += delta;
i -= delta;
}
else if(j > 0 && i < m && B[j-1] >= A[i]){
delta = ((j - j_low +1) >> 1);
i_low = i;
i = i + delta;
j = j - delta;
}
else{
active = false;
}
}
return i;
}
__device__ int co_rank_circular(int k, float *A, int m, float *B, int n, int A_S_start, int B_S_start){
int i = k < m?k:m;
int j = k - i;
int i_low = 0>(k-n)?0:k-n;
int j_low = 0>(k-m)?0:k-m;
int delta = 1;
bool active=true;
while(active){
int i_cir = (A_S_start+i>=TILE_SIZE_CIRCULAR)?A_S_start+i-TILE_SIZE_CIRCULAR:A_S_start+i;
int i_m_1_cir = (A_S_start+i-1>=TILE_SIZE_CIRCULAR)?A_S_start+i-TILE_SIZE_CIRCULAR-1:A_S_start+i-1;
int j_cir = (B_S_start+j>=TILE_SIZE_CIRCULAR)?B_S_start+j-TILE_SIZE_CIRCULAR:B_S_start+j;
int j_m_1_cir = (B_S_start+j-1>=TILE_SIZE_CIRCULAR)?B_S_start+j-TILE_SIZE_CIRCULAR-1:B_S_start+j-1;
if(i > 0 && j < n && A[i_m_1_cir] > B[j_cir]){
delta = ((i - i_low +1) >> 1);
j_low = j;
j += delta;
i -= delta;
} else if(j > 0 && i < m && B[j_m_1_cir] >= A[i_cir]) {
delta = ((j - j_low +1) >> 1) ;
i_low = i;
i = i + delta;
j = j - delta;
} else {
active = false;
}
}
return i;
}
/*
* Basic parallel merge kernel using co-rank function
* A, A_len - input array A and its length
* B, B_len - input array B and its length
* C - output array holding the merged elements.
* Length of C is A_len + B_len (size pre-allocated for you)
*/
__global__ void gpu_merge_basic_kernel(float* A, int A_len, float* B, int B_len, float* C) {
/* Your code here */
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int m=A_len, n=B_len;
int k_curr = tid*ceil_div(m+n,blockDim.x*gridDim.x);
int k_next = min((tid+1) * ceil_div(m+n,blockDim.x*gridDim.x), m+n);
int i_curr = co_rank(k_curr, A, m, B, n);
int i_next = co_rank(k_next, A, m, B, n);
int j_curr = k_curr - i_curr; int j_next = k_next - i_next;
merge_sequential( &A[i_curr], i_next-i_curr, &B[j_curr], j_next-j_curr, &C[k_curr] );
}
/*
* Arguments are the same as gpu_merge_basic_kernel.
* In this kernel, use shared memory to increase the reuse.
*/
__global__ void gpu_merge_tiled_kernel(float* A, int A_len, float* B, int B_len, float* C) {
/* Your code here */
extern __shared__ float shareAB[];
float* A_S = &shareAB[0];
float* B_S = &shareAB[TILE_SIZE];
int m = A_len, n = B_len;
int tx = threadIdx.x;
int bx = blockIdx.x;
int C_curr = bx * ceil_div(m+n,gridDim.x);
int C_next = min((bx+1) * ceil_div(m+n, gridDim.x), m+n);
int A_curr = co_rank(C_curr, A, m, B, n);
int B_curr = C_curr - A_curr;
int A_next = co_rank(C_next, A, m, B, n);
int B_next = C_next - A_next;
__syncthreads();
int counter = 0;
int C_length = C_next - C_curr;
int A_length = A_next - A_curr;
int B_length = B_next - B_curr;
int total_iteration = ceil_div(C_length, TILE_SIZE);
int C_completed = 0;
int A_consumed = 0;
int B_consumed = 0;
while(counter < total_iteration) {
for(int i=0; i<TILE_SIZE; i+=blockDim.x){
if(i + tx < A_length - A_consumed) {
A_S[i + tx] = A[A_curr + A_consumed + i + tx];
}
}
for(int i=0; i<TILE_SIZE; i+=blockDim.x){
if(i + tx < B_length - B_consumed) {
B_S[i + tx] = B[B_curr + B_consumed + i + tx];
}
}
__syncthreads();
int c_curr = tx * (TILE_SIZE/blockDim.x);
int c_next = (tx+1) * (TILE_SIZE/blockDim.x);
c_curr = c_curr<=(C_length-C_completed)?c_curr:C_length-C_completed;
c_next = c_next<=(C_length-C_completed)?c_next:C_length-C_completed;
int a_curr = co_rank(c_curr, A_S, min(TILE_SIZE, A_length-A_consumed), B_S, min(TILE_SIZE, B_length-B_consumed));
int b_curr = c_curr - a_curr;
int a_next = co_rank(c_next, A_S, min(TILE_SIZE, A_length-A_consumed), B_S, min(TILE_SIZE, B_length-B_consumed));
int b_next = c_next - a_next;
merge_sequential(&A_S[a_curr], a_next-a_curr, &B_S[b_curr], b_next-b_curr, &C[C_curr+C_completed+c_curr]);
counter ++;
C_completed += TILE_SIZE;
A_consumed += co_rank(TILE_SIZE, A_S, TILE_SIZE, B_S, TILE_SIZE);
B_consumed = C_completed - A_consumed;
__syncthreads();
}
}
/*
* gpu_merge_circular_buffer_kernel is optional.
* The implementation will be similar to tiled merge kernel.
* You'll have to modify co-rank function and sequential_merge
* to accommodate circular buffer.
*/
__global__ void gpu_merge_circular_buffer_kernel(float* A, int A_len, float* B, int B_len, float* C) {
/* Your code here */
extern __shared__ float shareAB[];
float* A_S = &shareAB[0];
float* B_S = &shareAB[TILE_SIZE_CIRCULAR];
int tx = threadIdx.x, bx = blockIdx.x;
int A_S_start = 0;
int B_S_start = 0;
int A_S_consumed = TILE_SIZE_CIRCULAR;
int B_S_consumed = TILE_SIZE_CIRCULAR;
int m = A_len, n = B_len;
int C_curr = bx * ceil_div(m+n,gridDim.x);
int C_next = min((bx+1) * ceil_div(m+n, gridDim.x), m+n);
int A_curr = co_rank(C_curr, A, m, B, n);
int B_curr = C_curr - A_curr;
int A_next = co_rank(C_next, A, m, B, n);
int B_next = C_next - A_next;
__syncthreads();
int counter = 0;
int C_length = C_next - C_curr;
int A_length = A_next - A_curr;
int B_length = B_next - B_curr;
int total_iteration = ceil_div(C_length, TILE_SIZE_CIRCULAR);
int C_completed = 0;
int A_consumed = 0;
int B_consumed = 0;
while(counter < total_iteration) {
for(int i = 0; i < A_S_consumed; i += blockDim.x){
if(i + tx < A_length-A_consumed && i + tx < A_S_consumed){
int load_index = A_S_start + (TILE_SIZE_CIRCULAR - A_S_consumed) + i + tx;
load_index %= TILE_SIZE_CIRCULAR;
A_S[load_index] = A[A_curr + A_consumed + i + tx];
}
}
for(int i = 0; i < B_S_consumed; i += blockDim.x){
if(i + tx < B_length-B_consumed && i + tx < B_S_consumed){
int load_index = B_S_start + (TILE_SIZE_CIRCULAR - B_S_consumed) + i + tx;
load_index %= TILE_SIZE_CIRCULAR;
B_S[load_index] = B[B_curr + B_consumed + i + tx];
}
}
__syncthreads();
int c_curr = tx * (TILE_SIZE_CIRCULAR/blockDim.x);
int c_next = (tx+1) * (TILE_SIZE_CIRCULAR/blockDim.x);
c_curr = c_curr<=(C_length-C_completed)?c_curr:C_length-C_completed;
c_next = c_next<=(C_length-C_completed)?c_next:C_length-C_completed;
int a_curr = co_rank_circular(c_curr, A_S, min(TILE_SIZE_CIRCULAR, A_length-A_consumed), B_S, min(TILE_SIZE_CIRCULAR, B_length-B_consumed),A_S_start,B_S_start);
int b_curr = c_curr - a_curr;
int a_next = co_rank_circular(c_next, A_S, min(TILE_SIZE_CIRCULAR, A_length-A_consumed), B_S, min(TILE_SIZE_CIRCULAR, B_length-B_consumed),A_S_start,B_S_start);
int b_next = c_next - a_next;
merge_sequential_circular(A_S, a_next-a_curr, B_S, b_next-b_curr, &C[C_curr+C_completed+c_curr], A_S_start+a_curr, B_S_start+b_curr);
A_S_consumed = co_rank_circular(min(TILE_SIZE_CIRCULAR, C_length-C_completed), A_S, min(TILE_SIZE_CIRCULAR, A_length-A_consumed), B_S, min(TILE_SIZE_CIRCULAR, B_length-B_consumed), A_S_start, B_S_start);
B_S_consumed = min(TILE_SIZE_CIRCULAR, C_length-C_completed) - A_S_consumed;
A_consumed += A_S_consumed;
C_completed += min(TILE_SIZE_CIRCULAR, C_length-C_completed);
B_consumed = C_completed - A_consumed;
A_S_start += A_S_consumed;
if(A_S_start >= TILE_SIZE_CIRCULAR){
A_S_start -= TILE_SIZE_CIRCULAR;
}
B_S_start += B_S_consumed;
if(B_S_start >= TILE_SIZE_CIRCULAR){
B_S_start -= TILE_SIZE_CIRCULAR;
}
counter += 1;
__syncthreads();
}
}
/******************************************************************************
Functions
*******************************************************************************/
void gpu_basic_merge(float* A, int A_len, float* B, int B_len, float* C) {
const int numBlocks = 128;
gpu_merge_basic_kernel<<<numBlocks, BLOCK_SIZE>>>(A, A_len, B, B_len, C);
}
void gpu_tiled_merge(float* A, int A_len, float* B, int B_len, float* C) {
const int numBlocks = 128;
size_t shemm_size;
shemm_size = 2*TILE_SIZE*sizeof(float);
gpu_merge_tiled_kernel<<<numBlocks, BLOCK_SIZE, shemm_size>>>(A, A_len, B, B_len, C);
}
void gpu_circular_buffer_merge(float* A, int A_len, float* B, int B_len, float* C) {
const int numBlocks = 128;
size_t shemm_size;
shemm_size = 2*TILE_SIZE_CIRCULAR*sizeof(float);
gpu_merge_circular_buffer_kernel<<<numBlocks, BLOCK_SIZE_CIRCULAR, shemm_size>>>(A, A_len, B, B_len, C);
}
|
1779e352c79b160eadf2441476f414aec1568074.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
This file contains simple wrapper functions that call the CUDA kernels
*/
#include <helper_cuda.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <GL/glew.h>
#include <cuda_gl_interop.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include "particles_kernel_device.cuh"
#include "ParticleSystem.cuh"
extern "C"
{
hipArray *noiseArray;
void initCuda(bool bUseGL)
{
if (bUseGL)
{
hipGLSetGLDevice(gpuGetMaxGflopsDeviceId());
}
else
{
hipSetDevice(gpuGetMaxGflopsDeviceId());
}
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
checkCudaErrors(hipMemcpyToSymbol(params, hostParams, sizeof(SimParams)));
}
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(int n, int blockSize, int &numBlocks, int &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
inline float frand()
{
return rand() / (float) RAND_MAX;
}
// create 3D texture containing random values
void createNoiseTexture(int w, int h, int d)
{
hipExtent size = make_hipExtent(w, h, d);
uint elements = (uint) size.width*size.height*size.depth;
float *volumeData = (float *)malloc(elements*4*sizeof(float));
float *ptr = volumeData;
for (uint i=0; i<elements; i++)
{
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
}
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
checkCudaErrors(hipMalloc3DArray(&noiseArray, &channelDesc, size));
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void *)volumeData, size.width*sizeof(float4), size.width, size.height);
copyParams.dstArray = noiseArray;
copyParams.extent = size;
copyParams.kind = hipMemcpyHostToDevice;
checkCudaErrors(hipMemcpy3D(©Params));
free(volumeData);
// set texture parameters
noiseTex.normalized = true; // access with normalized texture coordinates
noiseTex.filterMode = hipFilterModeLinear; // linear interpolation
noiseTex.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates
noiseTex.addressMode[1] = hipAddressModeWrap;
noiseTex.addressMode[2] = hipAddressModeWrap;
// bind array to 3D texture
checkCudaErrors(hipBindTextureToArray(noiseTex, noiseArray, channelDesc));
}
void
integrateSystem(float4 *oldPos, float4 *newPos,
float4 *oldVel, float4 *newVel,
float deltaTime,
int numParticles)
{
thrust::device_ptr<float4> d_newPos(newPos);
thrust::device_ptr<float4> d_newVel(newVel);
thrust::device_ptr<float4> d_oldPos(oldPos);
thrust::device_ptr<float4> d_oldVel(oldVel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_newPos, d_newVel, d_oldPos, d_oldVel)),
thrust::make_zip_iterator(thrust::make_tuple(d_newPos+numParticles, d_newVel+numParticles, d_oldPos+numParticles, d_oldVel+numParticles)),
integrate_functor(deltaTime));
}
void
calcDepth(float4 *pos,
float *keys, // output
uint *indices, // output
float3 sortVector,
int numParticles)
{
thrust::device_ptr<float4> d_pos(pos);
thrust::device_ptr<float> d_keys(keys);
thrust::device_ptr<uint> d_indices(indices);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos, d_keys)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos+numParticles, d_keys+numParticles)),
calcDepth_functor(sortVector));
thrust::sequence(d_indices, d_indices + numParticles);
}
void sortParticles(float *sortKeys, uint *indices, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<float>(sortKeys),
thrust::device_ptr<float>(sortKeys + numParticles),
thrust::device_ptr<uint>(indices));
}
} // extern "C"
| 1779e352c79b160eadf2441476f414aec1568074.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
This file contains simple wrapper functions that call the CUDA kernels
*/
#include <helper_cuda.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <GL/glew.h>
#include <cuda_gl_interop.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include "particles_kernel_device.cuh"
#include "ParticleSystem.cuh"
extern "C"
{
cudaArray *noiseArray;
void initCuda(bool bUseGL)
{
if (bUseGL)
{
cudaGLSetGLDevice(gpuGetMaxGflopsDeviceId());
}
else
{
cudaSetDevice(gpuGetMaxGflopsDeviceId());
}
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
checkCudaErrors(cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams)));
}
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(int n, int blockSize, int &numBlocks, int &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
inline float frand()
{
return rand() / (float) RAND_MAX;
}
// create 3D texture containing random values
void createNoiseTexture(int w, int h, int d)
{
cudaExtent size = make_cudaExtent(w, h, d);
uint elements = (uint) size.width*size.height*size.depth;
float *volumeData = (float *)malloc(elements*4*sizeof(float));
float *ptr = volumeData;
for (uint i=0; i<elements; i++)
{
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
*ptr++ = frand()*2.0f-1.0f;
}
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
checkCudaErrors(cudaMalloc3DArray(&noiseArray, &channelDesc, size));
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void *)volumeData, size.width*sizeof(float4), size.width, size.height);
copyParams.dstArray = noiseArray;
copyParams.extent = size;
copyParams.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaMemcpy3D(©Params));
free(volumeData);
// set texture parameters
noiseTex.normalized = true; // access with normalized texture coordinates
noiseTex.filterMode = cudaFilterModeLinear; // linear interpolation
noiseTex.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates
noiseTex.addressMode[1] = cudaAddressModeWrap;
noiseTex.addressMode[2] = cudaAddressModeWrap;
// bind array to 3D texture
checkCudaErrors(cudaBindTextureToArray(noiseTex, noiseArray, channelDesc));
}
void
integrateSystem(float4 *oldPos, float4 *newPos,
float4 *oldVel, float4 *newVel,
float deltaTime,
int numParticles)
{
thrust::device_ptr<float4> d_newPos(newPos);
thrust::device_ptr<float4> d_newVel(newVel);
thrust::device_ptr<float4> d_oldPos(oldPos);
thrust::device_ptr<float4> d_oldVel(oldVel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_newPos, d_newVel, d_oldPos, d_oldVel)),
thrust::make_zip_iterator(thrust::make_tuple(d_newPos+numParticles, d_newVel+numParticles, d_oldPos+numParticles, d_oldVel+numParticles)),
integrate_functor(deltaTime));
}
void
calcDepth(float4 *pos,
float *keys, // output
uint *indices, // output
float3 sortVector,
int numParticles)
{
thrust::device_ptr<float4> d_pos(pos);
thrust::device_ptr<float> d_keys(keys);
thrust::device_ptr<uint> d_indices(indices);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos, d_keys)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos+numParticles, d_keys+numParticles)),
calcDepth_functor(sortVector));
thrust::sequence(d_indices, d_indices + numParticles);
}
void sortParticles(float *sortKeys, uint *indices, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<float>(sortKeys),
thrust::device_ptr<float>(sortKeys + numParticles),
thrust::device_ptr<uint>(indices));
}
} // extern "C"
|
7834bbe363c3782073d93405432f404a36aa573d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_sinpi.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_sinpi), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_sinpi), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_sinpi), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7834bbe363c3782073d93405432f404a36aa573d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_sinpi.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_sinpi<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_sinpi<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_sinpi<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4590db5f6c2a2e74c3a50226a7304a5fae82ce58.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "rank_k_operation_profiler.h"
#include "gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
RankKOperationProfiler::RankKOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kRankK,
{
{ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)";
}
/// Destructor
RankKOperationProfiler::~RankKOperationProfiler() {
}
/// Prints usage statement for the math function
void RankKOperationProfiler::print_usage(std::ostream &out) const {
out << "RankK" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void RankKOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size Syrk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n"
<< "Profile a particular problem size Herk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=rank_k \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status RankKOperationProfiler::RankKProblem::parse(
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->k)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->n), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
// FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue]
// FLOPs = n(n+1)(k + 1)
int64_t flops_ = n * (n + 1) * (k + 1);
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void RankKOperationProfiler::RankKProblem::initialize_result(
PerformanceResult &result,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status RankKOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
rank_k_workspace_.configuration.lda = problem_.lda;
rank_k_workspace_.configuration.ldc = problem_.ldc;
rank_k_workspace_.configuration.ldd = problem_.ldc;
//rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
rank_k_workspace_.arguments.A = nullptr;
rank_k_workspace_.arguments.C = nullptr;
rank_k_workspace_.arguments.D = nullptr;
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
}
/// Initializes the performance result
void RankKOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
result.flops *= 4;
break;
default: break;
}
}
/// Initializes workspace
Status RankKOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
rank_k_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.lda)}
);
rank_k_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)},
1 // batch_count = 1, default
);
rank_k_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&rank_k_workspace_.configuration,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kRankK;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
hipError_t result = hipDeviceSynchronize();
if (result != hipSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::RankKDescription const &rank_k_desc =
static_cast<library::RankKDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
hipblasStatus_t status = handle.get_cublas_create_status();
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Syrk()
//
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasRankKDispatcher rank_k_op(
rank_k_desc,
rank_k_workspace_.configuration,
rank_k_workspace_.arguments
);
if (rank_k_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = rank_k_op(handle);
// Handle errors
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*rank_k_workspace_.Computed,
*rank_k_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
rank_k_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool RankKOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4590db5f6c2a2e74c3a50226a7304a5fae82ce58.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "rank_k_operation_profiler.h"
#include "gpu_timer.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
RankKOperationProfiler::RankKOperationProfiler(Options const &options):
OperationProfiler(
options,
library::OperationKind::kRankK,
{
{ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"},
{ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"},
{ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"},
{ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"},
{ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"},
{ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"},
{ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"},
{ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"},
{ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"},
{ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"},
{ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"},
},
{ library::Provider::kCUBLAS}
) {
description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)";
}
/// Destructor
RankKOperationProfiler::~RankKOperationProfiler() {
}
/// Prints usage statement for the math function
void RankKOperationProfiler::print_usage(std::ostream &out) const {
out << "RankK" << "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void RankKOperationProfiler::print_examples(std::ostream &out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size Syrk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n"
<< "Profile a particular problem size Herk kernel:\n"
<< " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n"
<< "Schmoo over fill modees:\n"
<< " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n"
<< "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n"
<< " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n"
<< " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=rank_k \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status RankKOperationProfiler::RankKProblem::parse(
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(
this->alpha,
operation_desc.element_epilogue,
"alpha",
problem_space,
problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(
this->beta,
operation_desc.element_epilogue,
"beta",
problem_space,
problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->n), int(this->k)}).front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->n), int(this->n)}).front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k +
// Half matrix including the diagonal will have (N*(N+1))/2 elements
int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const {
// FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue]
// FLOPs = n(n+1)(k + 1)
int64_t flops_ = n * (n + 1) * (k + 1);
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default: break;
}
return flops_;
}
/// Initializes a performance result
void RankKOperationProfiler::RankKProblem::initialize_result(
PerformanceResult &result,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout));
set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode));
set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode));
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status RankKOperationProfiler::initialize_configuration(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
rank_k_workspace_.configuration.problem_size.m() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.n() = int(problem_.n);
rank_k_workspace_.configuration.problem_size.k() = int(problem_.k);
rank_k_workspace_.configuration.lda = problem_.lda;
rank_k_workspace_.configuration.ldc = problem_.ldc;
rank_k_workspace_.configuration.ldd = problem_.ldc;
//rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices);
rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices);
rank_k_workspace_.arguments.A = nullptr;
rank_k_workspace_.arguments.C = nullptr;
rank_k_workspace_.arguments.D = nullptr;
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc, problem_space);
return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments);
}
/// Initializes the performance result
void RankKOperationProfiler::initialize_result_(
PerformanceResult &result,
Options const &options,
library::RankKDescription const &operation_desc,
ProblemSpace const &problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc, problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
result.flops *= 4;
break;
case library::MathOperationID::kMultiplyAddComplexFastF32:
result.flops *= 4;
break;
default: break;
}
}
/// Initializes workspace
Status RankKOperationProfiler::initialize_workspace(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
library::RankKDescription const &operation_desc =
static_cast<library::RankKDescription const &>(operation->description());
if (options.execution_mode != ExecutionMode::kDryRun) {
rank_k_workspace_.A = device_context.allocate_tensor(
options,
"A",
operation_desc.A.element,
operation_desc.A.layout,
{int(problem_.n), int(problem_.k)},
{int(problem_.lda)}
);
rank_k_workspace_.C = device_context.allocate_tensor(
options,
"C",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)},
1 // batch_count = 1, default
);
rank_k_workspace_.Computed = device_context.allocate_tensor(
"D",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Reference = device_context.allocate_tensor(
"Reference",
operation_desc.C.element,
operation_desc.C.layout,
{int(problem_.n), int(problem_.n)},
{int(problem_.ldc)}
);
rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data());
rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration);
rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size);
status = operation->initialize(
&rank_k_workspace_.configuration,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kRankK;
results_.back().disposition = Disposition::kNotRun;
for(auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_cutlass(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description());
if (cublas_satisfies(rank_k_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(
options,
report,
device_context,
operation,
problem_space,
problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for(auto &m : results_.back().verification_map) {
if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if(!is_any_verification_run_passed && m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if(is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool RankKOperationProfiler::verify_with_cublas_(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
#if CUTLASS_ENABLE_CUBLAS
library::RankKDescription const &rank_k_desc =
static_cast<library::RankKDescription const &>(operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublas<t>Syrk()
//
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
detail::cublasRankKDispatcher rank_k_op(
rank_k_desc,
rank_k_workspace_.configuration,
rank_k_workspace_.arguments
);
if (rank_k_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = rank_k_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors(
options,
*rank_k_workspace_.Computed,
*rank_k_workspace_.Reference
);
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) {
save_workspace(
device_context,
options,
rank_k_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
}
catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool RankKOperationProfiler::profile(
Options const &options,
PerformanceReport &report,
DeviceContext &device_context,
library::Operation const *operation,
ProblemSpace const &problem_space,
ProblemSpace::Problem const &problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing RankK arguments
rank_k_workspace_.arguments.A = rank_k_workspace_.A->data();
rank_k_workspace_.arguments.C = rank_k_workspace_.C->data();
rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data();
rank_k_workspace_.arguments.alpha = problem_.alpha.data();
rank_k_workspace_.arguments.beta = problem_.beta.data();
rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
results_.back().status = profile_cutlass_(
results_.back().runtime,
options,
operation,
&rank_k_workspace_.arguments,
rank_k_workspace_.host_workspace.data(),
rank_k_workspace_.device_workspace.data()
);
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
b909fbacc2f445911eb98324328d57b463263c27.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2019 ETH Zrich, Thomas Schps
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "libvis/cuda/test/cholesky_solver.cuh"
#include <hip/hip_runtime.h>
#include "libvis/cuda/cholesky_solver.h"
#include "libvis/cuda/cuda_util.h"
#include "libvis/logging.h"
namespace vis {
template <int N>
__global__ void CholeskySolverKernelForNMax7(
float* H, float* b) {
__shared__ float H_shared[N * (N + 1) / 2];
__shared__ float b_shared[N];
// Read inputs from global memory into shared memory.
// Note: cannot do all reads in one step for N == 7 with only 32 threads.
if (threadIdx.x < N) {
b_shared[threadIdx.x] = b[threadIdx.x];
}
if (threadIdx.x < N * (N + 1) / 2) {
H_shared[threadIdx.x] = H[threadIdx.x];
}
__syncthreads();
// Compute the results
SolveWithParallelCholeskyForNMax7<N>(H_shared, b_shared);
// Read results from shared memory into global memory
if (threadIdx.x < N) {
b[threadIdx.x] = b_shared[threadIdx.x];
}
if (threadIdx.x < N * (N + 1) / 2) {
H[threadIdx.x] = H_shared[threadIdx.x];
}
}
void CallCholeskySolverKernelForNMax7(int N, float* H_cuda, float* b_cuda) {
dim3 grid_dim(1, 1, 1);
dim3 block_dim(32, 1, 1);
hipStream_t stream = 0;
if (N == 1) {
hipLaunchKernelGGL(( CholeskySolverKernelForNMax7<1>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else if (N == 2) {
hipLaunchKernelGGL(( CholeskySolverKernelForNMax7<2>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else if (N == 3) {
hipLaunchKernelGGL(( CholeskySolverKernelForNMax7<3>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else if (N == 4) {
hipLaunchKernelGGL(( CholeskySolverKernelForNMax7<4>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else if (N == 5) {
hipLaunchKernelGGL(( CholeskySolverKernelForNMax7<5>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else if (N == 6) {
hipLaunchKernelGGL(( CholeskySolverKernelForNMax7<6>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else if (N == 7) {
hipLaunchKernelGGL(( CholeskySolverKernelForNMax7<7>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else {
LOG(FATAL) << "Value of N not supported: " << N;
}
CHECK_CUDA_NO_ERROR();
}
template <int N>
__global__ void CholeskySolverKernel(
float* H, float* b) {
const unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float H_shared[N * (N + 1) / 2];
__shared__ float b_shared[N];
// Read inputs from global memory into shared memory.
if (thread_index < N) {
b_shared[thread_index] = b[thread_index];
} else if (thread_index < N + N * (N + 1) / 2) {
H_shared[thread_index - N] = H[thread_index - N];
}
__syncthreads();
// Compute the results
SolveWithParallelCholesky<N>(H_shared, b_shared);
// Read results from shared memory into global memory
if (thread_index < N) {
b[thread_index] = b_shared[thread_index];
} else if (thread_index < N + N * (N + 1) / 2) {
H[thread_index - N] = H_shared[thread_index - N];
}
}
void CallCholeskySolverKernel(int N, float* H_cuda, float* b_cuda) {
constexpr int kBlockDim = 256;
CHECK_GE(kBlockDim, N + N * (N + 1) / 2);
dim3 grid_dim(1, 1, 1);
dim3 block_dim(kBlockDim, 1, 1);
hipStream_t stream = 0;
if (N == 8) {
hipLaunchKernelGGL(( CholeskySolverKernel<8>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else if (N == 16) {
hipLaunchKernelGGL(( CholeskySolverKernel<16>), dim3(grid_dim), dim3(block_dim), 0, stream, H_cuda, b_cuda);
} else {
LOG(FATAL) << "Value of N not supported, please add it here to fix: " << N;
}
CHECK_CUDA_NO_ERROR();
}
}
| b909fbacc2f445911eb98324328d57b463263c27.cu | // Copyright 2019 ETH Zürich, Thomas Schöps
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "libvis/cuda/test/cholesky_solver.cuh"
#include <cuda_runtime.h>
#include "libvis/cuda/cholesky_solver.h"
#include "libvis/cuda/cuda_util.h"
#include "libvis/logging.h"
namespace vis {
template <int N>
__global__ void CholeskySolverKernelForNMax7(
float* H, float* b) {
__shared__ float H_shared[N * (N + 1) / 2];
__shared__ float b_shared[N];
// Read inputs from global memory into shared memory.
// Note: cannot do all reads in one step for N == 7 with only 32 threads.
if (threadIdx.x < N) {
b_shared[threadIdx.x] = b[threadIdx.x];
}
if (threadIdx.x < N * (N + 1) / 2) {
H_shared[threadIdx.x] = H[threadIdx.x];
}
__syncthreads();
// Compute the results
SolveWithParallelCholeskyForNMax7<N>(H_shared, b_shared);
// Read results from shared memory into global memory
if (threadIdx.x < N) {
b[threadIdx.x] = b_shared[threadIdx.x];
}
if (threadIdx.x < N * (N + 1) / 2) {
H[threadIdx.x] = H_shared[threadIdx.x];
}
}
void CallCholeskySolverKernelForNMax7(int N, float* H_cuda, float* b_cuda) {
dim3 grid_dim(1, 1, 1);
dim3 block_dim(32, 1, 1);
cudaStream_t stream = 0;
if (N == 1) {
CholeskySolverKernelForNMax7<1><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else if (N == 2) {
CholeskySolverKernelForNMax7<2><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else if (N == 3) {
CholeskySolverKernelForNMax7<3><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else if (N == 4) {
CholeskySolverKernelForNMax7<4><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else if (N == 5) {
CholeskySolverKernelForNMax7<5><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else if (N == 6) {
CholeskySolverKernelForNMax7<6><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else if (N == 7) {
CholeskySolverKernelForNMax7<7><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else {
LOG(FATAL) << "Value of N not supported: " << N;
}
CHECK_CUDA_NO_ERROR();
}
template <int N>
__global__ void CholeskySolverKernel(
float* H, float* b) {
const unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float H_shared[N * (N + 1) / 2];
__shared__ float b_shared[N];
// Read inputs from global memory into shared memory.
if (thread_index < N) {
b_shared[thread_index] = b[thread_index];
} else if (thread_index < N + N * (N + 1) / 2) {
H_shared[thread_index - N] = H[thread_index - N];
}
__syncthreads();
// Compute the results
SolveWithParallelCholesky<N>(H_shared, b_shared);
// Read results from shared memory into global memory
if (thread_index < N) {
b[thread_index] = b_shared[thread_index];
} else if (thread_index < N + N * (N + 1) / 2) {
H[thread_index - N] = H_shared[thread_index - N];
}
}
void CallCholeskySolverKernel(int N, float* H_cuda, float* b_cuda) {
constexpr int kBlockDim = 256;
CHECK_GE(kBlockDim, N + N * (N + 1) / 2);
dim3 grid_dim(1, 1, 1);
dim3 block_dim(kBlockDim, 1, 1);
cudaStream_t stream = 0;
if (N == 8) {
CholeskySolverKernel<8><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else if (N == 16) {
CholeskySolverKernel<16><<<grid_dim, block_dim, 0, stream>>>(H_cuda, b_cuda);
} else {
LOG(FATAL) << "Value of N not supported, please add it here to fix: " << N;
}
CHECK_CUDA_NO_ERROR();
}
}
|
7d7c8fd559cc10d4c26c6c1b051ebe412361e2a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
/*
* Build out this kernel.
*/
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
/*
* This CPU function already works, and will run to create a solution matrix
* against which to verify your work building out the matrixMulGPU kernel.
*/
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
hipMallocManaged (&a, size);
hipMallocManaged (&b, size);
hipMallocManaged (&c_cpu, size);
hipMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
/*
* Assign `threads_per_block` and `number_of_blocks` 2D values
* that can be used in matrixMulGPU above.
*/
dim3 threads_per_block(16,16,1);
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
hipLaunchKernelGGL(( matrixMulGPU) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c_gpu );
hipDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
hipFree(a); hipFree(b);
hipFree( c_cpu ); hipFree( c_gpu );
}
| 7d7c8fd559cc10d4c26c6c1b051ebe412361e2a4.cu | #include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
/*
* Build out this kernel.
*/
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
/*
* This CPU function already works, and will run to create a solution matrix
* against which to verify your work building out the matrixMulGPU kernel.
*/
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu; // Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
/*
* Assign `threads_per_block` and `number_of_blocks` 2D values
* that can be used in matrixMulGPU above.
*/
dim3 threads_per_block(16,16,1);
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
cudaDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
}
|
68d587cacad3da5a9c93d39a70d43bbdd3de639b.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "helper_cuda.h"
#include "helper_timer.h"
#include <iostream>
#include <sys/time.h>
#include "bucketsort.cuh"
#include "mergesort.cuh"
using namespace std;
////////////////////////////////////////////////////////////////////////////////
// Size of the testset (Bitwise shift of 1 over 22 places)
////////////////////////////////////////////////////////////////////////////////
#define SIZE (1 << 22)
////////////////////////////////////////////////////////////////////////////////
// Number of tests to average over
////////////////////////////////////////////////////////////////////////////////
#define TEST 1
////////////////////////////////////////////////////////////////////////////////
// The timers for the different parts of the algo
////////////////////////////////////////////////////////////////////////////////
StopWatchInterface *uploadTimer, *downloadTimer, *bucketTimer,
*mergeTimer, *totalTimer, *cpuTimer;
////////////////////////////////////////////////////////////////////////////////
// Compare method for CPU sort
////////////////////////////////////////////////////////////////////////////////
inline int compare(const void *a, const void *b) {
if(*((float *)a) < *((float *)b)) return -1;
else if(*((float *)a) > *((float *)b)) return 1;
else return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Forward declaration
////////////////////////////////////////////////////////////////////////////////
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
#ifdef GPUD
#ifdef THREADED
#ifdef DRAM
#define NVMED_INIT(a) nvmed_init(6)
#define NVMED_SEND(a,b,c,d) nvmed_host_pipeline_send(a, b, c, d, NULL)
#define NVMED_RECV(a,b,c,d) nvmed_host_pipeline_recv(a,b,c,d)
#else
#define NVMED_INIT(a) nvmed_init(6)
#define NVMED_SEND(a,b,c,d) nvmed_send_threaded(a,b,c,d)
#define NVMED_RECV(a,b,c,d) nvmed_recv_threaded(a,b,c,d)
#endif
#else
#define NVMED_INIT(a) nvmed_init(a)
#define NVMED_SEND(a,b,c,d) nvmed_send(a,b,c,d)
#define NVMED_RECV(a,b,c,d) nvmed_recv(a,b,c,d)
#endif
#endif
int
main( int argc, char** argv)
{
// Create timers for each sort
struct timeval time_start, time_end, program_start, program_end;
gettimeofday(&program_start, NULL);
sdkCreateTimer(&uploadTimer);
sdkCreateTimer(&downloadTimer);
sdkCreateTimer(&bucketTimer);
sdkCreateTimer(&mergeTimer);
sdkCreateTimer(&totalTimer);
sdkCreateTimer(&cpuTimer);
int numElements = 0;
// Number of elements in the test bed
gettimeofday(&time_start, NULL);
#ifdef READING_FROM_BINARY
numElements = atoi(argv[2]);
#else
if(strcmp(argv[1],"r") ==0) {
numElements = SIZE;
}
else {
FILE *fp;
fp = fopen(argv[1],"r");
if(fp == NULL) {
cout << "Error reading file" << endl;
exit(EXIT_FAILURE);
}
int count = 0;
float c;
while(fscanf(fp,"%f",&c) != EOF) {
count++;
}
fclose(fp);
numElements = count;
}
#endif
cout << "Sorting list of " << numElements << " floats\n";
// Generate random data
// Memory space the list of random floats will take up
int mem_size = numElements * sizeof(float);
// Allocate enough for the input list
float *cpu_idata = (float *)malloc(mem_size);
// Allocate enough for the output list on the cpu side
float *cpu_odata = (float *)malloc(mem_size);
// Allocate enough memory for the output list on the gpu side
float *gpu_odata = (float *)malloc(mem_size);
float datamin = FLT_MAX;
float datamax = -FLT_MAX;
if(strcmp(argv[1],"r")==0) {
for (int i = 0; i < numElements; i++) {
// Generate random floats between 0 and 1 for the input data
cpu_idata[i] = ((float) rand() / RAND_MAX);
//Compare data at index to data minimum, if less than current minimum, set that element as new minimum
datamin = min(cpu_idata[i], datamin);
//Same as above but for maximum
datamax = max(cpu_idata[i], datamax);
}
} else {
FILE *fp;
#ifdef READING_FROM_BINARY
fp = fopen(argv[1],"rb");
fread(cpu_idata,sizeof(float),numElements,fp);
fclose(fp);
gettimeofday(&time_end, NULL);
printf("FileInput %lf seconds\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))/1000000.0);
gettimeofday(&time_start, NULL);
/* for(int i = 0; i < numElements; i++) {
datamin = min(cpu_idata[i],datamin);
datamax = max(cpu_idata[i],datamax);
}
*/
datamin = 0.0;
datamax = 5.0;
// gettimeofday(&time_end, NULL);
// printf("HGProfile: CPUFindMinMax %d m:%f M:%f\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec)),datamin, datamax);
#else
fp = fopen(argv[1],"r");
for(int i = 0; i < numElements; i++) {
fscanf(fp,"%f",&cpu_idata[i]);
datamin = min(cpu_idata[i],datamin);
datamax = max(cpu_idata[i],datamax);
}
fclose(fp);
gettimeofday(&time_end, NULL);
printf("FileInput %lf seconds\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))/1000000.0);
gettimeofday(&time_start, NULL);
#endif
}
#ifndef CPU
cout << "Sorting on GPU..." << flush;
// GPU Sort
for (int i = 0; i < TEST; i++)
cudaSort(cpu_idata, datamin, datamax, gpu_odata, numElements);
#endif
#ifdef VERIFY
cout << "Sorting on CPU..." << flush;
// CPU Sort
sdkStartTimer(&cpuTimer);
memcpy(cpu_odata, cpu_idata, mem_size);
qsort(cpu_odata, numElements, sizeof(float), compare);
sdkStopTimer(&cpuTimer);
cout << "done.\n";
/* cout << "Checking result..." << flush;
// Result checking
int count = 0;
for(int i = 0; i < numElements; i++)
if(cpu_odata[i] != gpu_odata[i])
{
printf("Sort missmatch on element %d: \n", i);
printf("CPU = %f : GPU = %f\n", cpu_odata[i], gpu_odata[i]);
count++;
break;
}
if(count == 0) cout << "PASSED.\n";
else cout << "FAILED.\n";*/
#endif
// Timer report
printf("GPU iterations: %d\n", TEST);
#ifdef TIMER
#ifdef VERIFY
printf("Total CPU execution time: %lf seconds\n", sdkGetTimerValue(&cpuTimer)/1000);
#endif
#ifndef CPU
printf("Total GPU execution time: %lf seconds\n", sdkGetTimerValue(&totalTimer) /1000);
printf(" - Upload : %lf seconds\n", sdkGetTimerValue(&uploadTimer) /1000);
printf(" - Download : %lf seconds\n", sdkGetTimerValue(&downloadTimer) /1000);
printf(" - Bucket sort : %lf seconds\n", sdkGetTimerValue(&bucketTimer) /1000);
printf(" - Merge sort : %lf seconds\n", sdkGetTimerValue(&mergeTimer) /1000);
#endif
#endif
#ifdef OUTPUT
FILE *tp;
const char filename2[]="./hybridoutput.txt";
tp = fopen(filename2,"w");
for(int i = 0; i < numElements; i++) {
fprintf(tp,"%f ",cpu_idata[i]);
}
fclose(tp);
#endif
// Release memory
gettimeofday(&program_end, NULL);
printf("HGProfile: Total %lf\n",((program_end.tv_sec * 1000000 + program_end.tv_usec) - (program_start.tv_sec * 1000000 + program_start.tv_usec))-sdkGetTimerValue(&cpuTimer)/1000);
sdkDeleteTimer(&uploadTimer);
sdkDeleteTimer(&downloadTimer);
sdkDeleteTimer(&bucketTimer);
sdkDeleteTimer(&mergeTimer);
sdkDeleteTimer(&totalTimer);
sdkDeleteTimer(&cpuTimer);
#ifndef GPUD
free(cpu_idata);
#endif
free(cpu_odata); free(gpu_odata);
#ifdef GPUD
// nvmed_deinit();
#endif
}
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements)
{
// Initialization and upload data
// struct timeval time_start, time_end;
float *d_input = NULL;
float *d_output = NULL;
int mem_size = (numElements + DIVISIONS * 4) * sizeof(float);
// gettimeofday(&time_start, NULL);
sdkStartTimer(&uploadTimer);
{
hipMalloc((void**) &d_output, mem_size);
#ifdef GPUD
#if TEST > 1
hipMalloc((void**) &d_input, mem_size);
hipMemcpy((void *) d_input, (void *)origList, numElements * sizeof(float),
hipMemcpyDeviceToDevice);
// hipFree(origList);
#else
d_input=origList;
#endif
#else
hipMalloc((void**) &d_input, mem_size);
hipMemcpy((void *) d_input, (void *)origList, numElements * sizeof(float),
hipMemcpyHostToDevice);
#endif
init_bucketsort(numElements);
}
// gettimeofday(&time_end, NULL);
// printf("HGProfile: cudaMemcpyHD %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec)));
sdkStopTimer(&uploadTimer);
//system("/homes/h1tseng/Gordon/PMCS/tools/powermeter/powermeter_client 0.0.0.0 \"e\" 27072");
//system("/homes/h1tseng/Gordon/PMCS/tools/powermeter/powermeter_client 0.0.0.0 \"b\" 27072");
sdkStartTimer(&totalTimer);
// Bucketsort the list
sdkStartTimer(&bucketTimer);
int *sizes = (int*) malloc(DIVISIONS * sizeof(int));
int *nullElements = (int*) malloc(DIVISIONS * sizeof(int));
unsigned int *origOffsets = (unsigned int *) malloc((DIVISIONS + 1) * sizeof(int));
bucketSort(d_input, d_output, numElements, sizes, nullElements,
minimum, maximum, origOffsets);
sdkStopTimer(&bucketTimer);
// Mergesort the result
sdkStartTimer(&mergeTimer);
float4 *d_origList = (float4*) d_output,
*d_resultList = (float4*) d_input;
int newlistsize = 0;
for(int i = 0; i < DIVISIONS; i++)
newlistsize += sizes[i] * 4;
float4 *mergeresult = runMergeSort( newlistsize, DIVISIONS, d_origList, d_resultList,
sizes, nullElements, origOffsets); //d_origList;
hipDeviceSynchronize();
sdkStopTimer(&mergeTimer);
sdkStopTimer(&totalTimer);
// fprintf(stderr, "%p %p\n",resultList, mergeresult );
// Download result
sdkStartTimer(&downloadTimer);
/* checkCudaErrors( hipMemcpy((void *) resultList,
(void *)mergeresult, numElements * sizeof(float), hipMemcpyDeviceToHost) );*/
sdkStopTimer(&downloadTimer);
// Clean up
finish_bucketsort();
//#ifdef GPUD
hipFree(d_input);
//#endif
hipFree(d_output);
free(nullElements); free(sizes);
}
| 68d587cacad3da5a9c93d39a70d43bbdd3de639b.cu | #ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "helper_cuda.h"
#include "helper_timer.h"
#include <iostream>
#include <sys/time.h>
#include "bucketsort.cuh"
#include "mergesort.cuh"
using namespace std;
////////////////////////////////////////////////////////////////////////////////
// Size of the testset (Bitwise shift of 1 over 22 places)
////////////////////////////////////////////////////////////////////////////////
#define SIZE (1 << 22)
////////////////////////////////////////////////////////////////////////////////
// Number of tests to average over
////////////////////////////////////////////////////////////////////////////////
#define TEST 1
////////////////////////////////////////////////////////////////////////////////
// The timers for the different parts of the algo
////////////////////////////////////////////////////////////////////////////////
StopWatchInterface *uploadTimer, *downloadTimer, *bucketTimer,
*mergeTimer, *totalTimer, *cpuTimer;
////////////////////////////////////////////////////////////////////////////////
// Compare method for CPU sort
////////////////////////////////////////////////////////////////////////////////
inline int compare(const void *a, const void *b) {
if(*((float *)a) < *((float *)b)) return -1;
else if(*((float *)a) > *((float *)b)) return 1;
else return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Forward declaration
////////////////////////////////////////////////////////////////////////////////
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
#ifdef GPUD
#ifdef THREADED
#ifdef DRAM
#define NVMED_INIT(a) nvmed_init(6)
#define NVMED_SEND(a,b,c,d) nvmed_host_pipeline_send(a, b, c, d, NULL)
#define NVMED_RECV(a,b,c,d) nvmed_host_pipeline_recv(a,b,c,d)
#else
#define NVMED_INIT(a) nvmed_init(6)
#define NVMED_SEND(a,b,c,d) nvmed_send_threaded(a,b,c,d)
#define NVMED_RECV(a,b,c,d) nvmed_recv_threaded(a,b,c,d)
#endif
#else
#define NVMED_INIT(a) nvmed_init(a)
#define NVMED_SEND(a,b,c,d) nvmed_send(a,b,c,d)
#define NVMED_RECV(a,b,c,d) nvmed_recv(a,b,c,d)
#endif
#endif
int
main( int argc, char** argv)
{
// Create timers for each sort
struct timeval time_start, time_end, program_start, program_end;
gettimeofday(&program_start, NULL);
sdkCreateTimer(&uploadTimer);
sdkCreateTimer(&downloadTimer);
sdkCreateTimer(&bucketTimer);
sdkCreateTimer(&mergeTimer);
sdkCreateTimer(&totalTimer);
sdkCreateTimer(&cpuTimer);
int numElements = 0;
// Number of elements in the test bed
gettimeofday(&time_start, NULL);
#ifdef READING_FROM_BINARY
numElements = atoi(argv[2]);
#else
if(strcmp(argv[1],"r") ==0) {
numElements = SIZE;
}
else {
FILE *fp;
fp = fopen(argv[1],"r");
if(fp == NULL) {
cout << "Error reading file" << endl;
exit(EXIT_FAILURE);
}
int count = 0;
float c;
while(fscanf(fp,"%f",&c) != EOF) {
count++;
}
fclose(fp);
numElements = count;
}
#endif
cout << "Sorting list of " << numElements << " floats\n";
// Generate random data
// Memory space the list of random floats will take up
int mem_size = numElements * sizeof(float);
// Allocate enough for the input list
float *cpu_idata = (float *)malloc(mem_size);
// Allocate enough for the output list on the cpu side
float *cpu_odata = (float *)malloc(mem_size);
// Allocate enough memory for the output list on the gpu side
float *gpu_odata = (float *)malloc(mem_size);
float datamin = FLT_MAX;
float datamax = -FLT_MAX;
if(strcmp(argv[1],"r")==0) {
for (int i = 0; i < numElements; i++) {
// Generate random floats between 0 and 1 for the input data
cpu_idata[i] = ((float) rand() / RAND_MAX);
//Compare data at index to data minimum, if less than current minimum, set that element as new minimum
datamin = min(cpu_idata[i], datamin);
//Same as above but for maximum
datamax = max(cpu_idata[i], datamax);
}
} else {
FILE *fp;
#ifdef READING_FROM_BINARY
fp = fopen(argv[1],"rb");
fread(cpu_idata,sizeof(float),numElements,fp);
fclose(fp);
gettimeofday(&time_end, NULL);
printf("FileInput %lf seconds\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))/1000000.0);
gettimeofday(&time_start, NULL);
/* for(int i = 0; i < numElements; i++) {
datamin = min(cpu_idata[i],datamin);
datamax = max(cpu_idata[i],datamax);
}
*/
datamin = 0.0;
datamax = 5.0;
// gettimeofday(&time_end, NULL);
// printf("HGProfile: CPUFindMinMax %d m:%f M:%f\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec)),datamin, datamax);
#else
fp = fopen(argv[1],"r");
for(int i = 0; i < numElements; i++) {
fscanf(fp,"%f",&cpu_idata[i]);
datamin = min(cpu_idata[i],datamin);
datamax = max(cpu_idata[i],datamax);
}
fclose(fp);
gettimeofday(&time_end, NULL);
printf("FileInput %lf seconds\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec))/1000000.0);
gettimeofday(&time_start, NULL);
#endif
}
#ifndef CPU
cout << "Sorting on GPU..." << flush;
// GPU Sort
for (int i = 0; i < TEST; i++)
cudaSort(cpu_idata, datamin, datamax, gpu_odata, numElements);
#endif
#ifdef VERIFY
cout << "Sorting on CPU..." << flush;
// CPU Sort
sdkStartTimer(&cpuTimer);
memcpy(cpu_odata, cpu_idata, mem_size);
qsort(cpu_odata, numElements, sizeof(float), compare);
sdkStopTimer(&cpuTimer);
cout << "done.\n";
/* cout << "Checking result..." << flush;
// Result checking
int count = 0;
for(int i = 0; i < numElements; i++)
if(cpu_odata[i] != gpu_odata[i])
{
printf("Sort missmatch on element %d: \n", i);
printf("CPU = %f : GPU = %f\n", cpu_odata[i], gpu_odata[i]);
count++;
break;
}
if(count == 0) cout << "PASSED.\n";
else cout << "FAILED.\n";*/
#endif
// Timer report
printf("GPU iterations: %d\n", TEST);
#ifdef TIMER
#ifdef VERIFY
printf("Total CPU execution time: %lf seconds\n", sdkGetTimerValue(&cpuTimer)/1000);
#endif
#ifndef CPU
printf("Total GPU execution time: %lf seconds\n", sdkGetTimerValue(&totalTimer) /1000);
printf(" - Upload : %lf seconds\n", sdkGetTimerValue(&uploadTimer) /1000);
printf(" - Download : %lf seconds\n", sdkGetTimerValue(&downloadTimer) /1000);
printf(" - Bucket sort : %lf seconds\n", sdkGetTimerValue(&bucketTimer) /1000);
printf(" - Merge sort : %lf seconds\n", sdkGetTimerValue(&mergeTimer) /1000);
#endif
#endif
#ifdef OUTPUT
FILE *tp;
const char filename2[]="./hybridoutput.txt";
tp = fopen(filename2,"w");
for(int i = 0; i < numElements; i++) {
fprintf(tp,"%f ",cpu_idata[i]);
}
fclose(tp);
#endif
// Release memory
gettimeofday(&program_end, NULL);
printf("HGProfile: Total %lf\n",((program_end.tv_sec * 1000000 + program_end.tv_usec) - (program_start.tv_sec * 1000000 + program_start.tv_usec))-sdkGetTimerValue(&cpuTimer)/1000);
sdkDeleteTimer(&uploadTimer);
sdkDeleteTimer(&downloadTimer);
sdkDeleteTimer(&bucketTimer);
sdkDeleteTimer(&mergeTimer);
sdkDeleteTimer(&totalTimer);
sdkDeleteTimer(&cpuTimer);
#ifndef GPUD
free(cpu_idata);
#endif
free(cpu_odata); free(gpu_odata);
#ifdef GPUD
// nvmed_deinit();
#endif
}
void cudaSort(float *origList, float minimum, float maximum,
float *resultList, int numElements)
{
// Initialization and upload data
// struct timeval time_start, time_end;
float *d_input = NULL;
float *d_output = NULL;
int mem_size = (numElements + DIVISIONS * 4) * sizeof(float);
// gettimeofday(&time_start, NULL);
sdkStartTimer(&uploadTimer);
{
cudaMalloc((void**) &d_output, mem_size);
#ifdef GPUD
#if TEST > 1
cudaMalloc((void**) &d_input, mem_size);
cudaMemcpy((void *) d_input, (void *)origList, numElements * sizeof(float),
cudaMemcpyDeviceToDevice);
// cudaFree(origList);
#else
d_input=origList;
#endif
#else
cudaMalloc((void**) &d_input, mem_size);
cudaMemcpy((void *) d_input, (void *)origList, numElements * sizeof(float),
cudaMemcpyHostToDevice);
#endif
init_bucketsort(numElements);
}
// gettimeofday(&time_end, NULL);
// printf("HGProfile: cudaMemcpyHD %d\n",((time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec)));
sdkStopTimer(&uploadTimer);
//system("/homes/h1tseng/Gordon/PMCS/tools/powermeter/powermeter_client 0.0.0.0 \"e\" 27072");
//system("/homes/h1tseng/Gordon/PMCS/tools/powermeter/powermeter_client 0.0.0.0 \"b\" 27072");
sdkStartTimer(&totalTimer);
// Bucketsort the list
sdkStartTimer(&bucketTimer);
int *sizes = (int*) malloc(DIVISIONS * sizeof(int));
int *nullElements = (int*) malloc(DIVISIONS * sizeof(int));
unsigned int *origOffsets = (unsigned int *) malloc((DIVISIONS + 1) * sizeof(int));
bucketSort(d_input, d_output, numElements, sizes, nullElements,
minimum, maximum, origOffsets);
sdkStopTimer(&bucketTimer);
// Mergesort the result
sdkStartTimer(&mergeTimer);
float4 *d_origList = (float4*) d_output,
*d_resultList = (float4*) d_input;
int newlistsize = 0;
for(int i = 0; i < DIVISIONS; i++)
newlistsize += sizes[i] * 4;
float4 *mergeresult = runMergeSort( newlistsize, DIVISIONS, d_origList, d_resultList,
sizes, nullElements, origOffsets); //d_origList;
cudaThreadSynchronize();
sdkStopTimer(&mergeTimer);
sdkStopTimer(&totalTimer);
// fprintf(stderr, "%p %p\n",resultList, mergeresult );
// Download result
sdkStartTimer(&downloadTimer);
/* checkCudaErrors( cudaMemcpy((void *) resultList,
(void *)mergeresult, numElements * sizeof(float), cudaMemcpyDeviceToHost) );*/
sdkStopTimer(&downloadTimer);
// Clean up
finish_bucketsort();
//#ifdef GPUD
cudaFree(d_input);
//#endif
cudaFree(d_output);
free(nullElements); free(sizes);
}
|
07042cf9f424e04f0fed7853b86b9918ca767766.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#define N_samp 8
#define N_col 64
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int TD_NUM, int N_sim);
void FBComb(float *y, float *Vect_F, int N_sim);
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim);
int main(int argc, char *argv[]){
float **r;
float **r_dev;
float *y;
float **H;
float **H_dev;
float **F;
float **F_dev;
float **Vect_H; // output of the F
float **Vect_H_dev;
float **Vect_Dn; // output of the down sampler
float **Vect_Dn_dev;
float **Vect_Up; // output of the up sampler
float **Vect_Up_dev;
float **Vect_F; // this is the output of the
float **Vect_F_dev;
float **h_Vect_F;
float *h_y;
if(argc < 4){
printf("Error input: filter length #channel #thread\n");
exit(1);
}
int N_sim = atoi(argv[1]);
int N_ch = atoi(argv[2]);
int TD_NUM = atoi(argv[3]);
printf("Baseline FilterBank #task:%d, size:%d, #thread:%d\n", N_ch, N_sim, TD_NUM);
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
hipStream_t *filter_stream;
int i, j;
double start_timer, end_timer;
filter_stream = (hipStream_t*)malloc(N_ch*sizeof(hipStream_t));
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipStreamCreate(&filter_stream[i]));
}
r = (float**)malloc(N_ch*sizeof(float*));
H = (float**)malloc(N_ch*sizeof(float*));
F = (float**)malloc(N_ch*sizeof(float*));
Vect_H = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn = (float**)malloc(N_ch*sizeof(float*));
Vect_Up = (float**)malloc(N_ch*sizeof(float*));
Vect_F = (float**)malloc(N_ch*sizeof(float*));
r_dev = (float**)malloc(N_ch*sizeof(float*));
H_dev = (float**)malloc(N_ch*sizeof(float*));
F_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_H_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Up_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_F_dev = (float**)malloc(N_ch*sizeof(float*));
h_Vect_F = (float**)malloc(N_ch*sizeof(float*));
/*Memory allocation*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipHostMalloc(&r[i], N_sim*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&r_dev[i], N_sim*sizeof(float)));
checkCudaErrors(hipHostMalloc(&H[i], N_col*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&H_dev[i], N_col*sizeof(float)));
checkCudaErrors(hipHostMalloc(&F[i], N_col*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&F_dev[i], N_col*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_H[i], N_sim*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_H_dev[i], N_sim*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_Dn[i], (N_sim/N_samp)*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_Dn_dev[i], (N_sim/N_samp)*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_Up[i], N_sim*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_Up_dev[i], N_sim*sizeof(float)));
checkCudaErrors(hipHostMalloc(&Vect_F[i], N_sim*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&Vect_F_dev[i], N_sim*sizeof(float)));
h_Vect_F[i] = (float*)malloc(N_sim * sizeof(float));
}
y = (float*)malloc(N_sim*sizeof(float));
h_y = (float*)malloc(N_sim*sizeof(float));
/*init data*/
for(i = 0; i < N_ch; i++)
for(j = 0; j < N_sim; j++){
r[i][j] = j + 0.0001;
y[j] = 0;
Vect_Up[i][j] = 0;
Vect_F[i][j] = 0;
Vect_H[i][j]=0;
h_Vect_F[i][j] = 0;
}
for(i = 0; i < N_ch; i++)
for(j = 0; j < N_col; j++){
H[i][j] = 0.0001;
F[i][j] = 0.0001;
}
// Data transfer to device
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipMemcpyAsync(r_dev[i], r[i], N_sim*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(Vect_Up_dev[i], Vect_Up[i], N_sim*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(Vect_F_dev[i], Vect_F[i], N_sim*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(Vect_H_dev[i], Vect_H[i], N_sim*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(H_dev[i], H[i], N_col*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(hipMemcpyAsync(F_dev[i], F[i], N_col*sizeof(float), hipMemcpyHostToDevice, filter_stream[i]));
}
checkCudaErrors(hipDeviceSynchronize());
end_timer = my_timer();
// task launch
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
hipLaunchKernelGGL(( FBCore), dim3(1), dim3(TD_NUM), 0, filter_stream[i], r_dev[i], H_dev[i], Vect_H_dev[i], Vect_Dn_dev[i],
Vect_Up_dev[i], Vect_F_dev[i], F_dev[i], TD_NUM, N_sim);
}
checkCudaErrors(hipDeviceSynchronize());
end_timer = my_timer();
printf("The GPU Elapsed time:%f Sec.\n", end_timer - start_timer);
start_timer = my_timer();
// Data transfer back to host
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipMemcpyAsync(Vect_F[i], Vect_F_dev[i], N_sim*sizeof(float), hipMemcpyDeviceToHost, filter_stream[i]));
}
checkCudaErrors(hipDeviceSynchronize());
end_timer = my_timer();
/*Merge process*/
for(i = 0; i < N_ch; i++){
FBComb(y, Vect_F[i], N_sim);
}
printf("CPU program running\n");
/*CPU tasks*/
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
h_FBCore(r[i], H[i], Vect_H[i], Vect_Dn[i], Vect_Up[i], h_Vect_F[i], F[i], N_sim);
}
end_timer = my_timer();
printf("CPU Elapsed time:%f Sec.\n", end_timer - start_timer);
/*Merge process*/
for(i = 0; i < N_ch; i++){
FBComb(h_y, h_Vect_F[i], N_sim);
}
/*Verify*/
printf("Verify\n");
long long flag = 0;
for(i = 0; i < N_sim; i++){
if(abs(h_y[i] - y[i]) > 1e-3){
printf("Error:%f, %f, %d\n", h_y[i], y[i], i);
break;
}
flag ++;
}
if(flag == N_sim) printf("Verify Successfully\n");
/*Free Memory*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipStreamDestroy(filter_stream[i]));
}
for(i = 0; i < N_ch; i++){
checkCudaErrors(hipHostFree(r[i]));
checkCudaErrors(hipFree(r_dev[i]));
checkCudaErrors(hipHostFree(H[i]));
checkCudaErrors(hipFree(H_dev[i]));
checkCudaErrors(hipHostFree(F[i]));
checkCudaErrors(hipFree(F_dev[i]));
checkCudaErrors(hipHostFree(Vect_H[i]));
checkCudaErrors(hipFree(Vect_H_dev[i]));
checkCudaErrors(hipHostFree(Vect_Dn[i]));
checkCudaErrors(hipFree(Vect_Dn_dev[i]));
checkCudaErrors(hipHostFree(Vect_Up[i]));
checkCudaErrors(hipFree(Vect_Up_dev[i]));
checkCudaErrors(hipHostFree(Vect_F[i]));
}
free(y);
free(r);
free(H);
free(F);
free(Vect_H);
free(Vect_Dn);
free(Vect_Up);
free(Vect_F);
free(r_dev);
free(H_dev);
free(F_dev);
free(Vect_H_dev);
free(Vect_Dn_dev);
free(Vect_Up_dev);
free(Vect_F_dev);
return 0;
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim){
int j, k;
//convolving H
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_H[j] += (r[j-k]*H[k]);
}
}
}
//Down Sampling
for (j=0; j < N_sim/N_samp; j++)
Vect_Dn[j]=Vect_H[j*N_samp];
//Up Sampling
for (j=0; j < N_sim/N_samp;j++)
Vect_Up[j*N_samp]=Vect_Dn[j];
//convolving F
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_F[j]+=(F[k]*Vect_Up[j-k]);
}
}
}
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int TD_NUM, int N_sim){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j, k;
//convolving H
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_H[j*TD_NUM+tid] += (r[(j*TD_NUM+tid)-k]*H[k]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM; j++)
Vect_Dn[(j*TD_NUM+tid)]=Vect_H[(j*TD_NUM+tid)*N_samp];
//Up Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM;j++)
Vect_Up[(j*TD_NUM+tid)*N_samp]=Vect_Dn[(j*TD_NUM+tid)];
__syncthreads();
//convolving F
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_F[j*TD_NUM+tid]+=(F[k]*Vect_Up[(j*TD_NUM+tid)-k]);
}
}
}
}
}
void FBComb(float *y, float *Vect_F, int N_sim){
int j;
//adding the results to the y matrix
for (j=0; j < N_sim; j++)
y[j]+=Vect_F[j];
}
| 07042cf9f424e04f0fed7853b86b9918ca767766.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#define N_samp 8
#define N_col 64
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int TD_NUM, int N_sim);
void FBComb(float *y, float *Vect_F, int N_sim);
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim);
int main(int argc, char *argv[]){
float **r;
float **r_dev;
float *y;
float **H;
float **H_dev;
float **F;
float **F_dev;
float **Vect_H; // output of the F
float **Vect_H_dev;
float **Vect_Dn; // output of the down sampler
float **Vect_Dn_dev;
float **Vect_Up; // output of the up sampler
float **Vect_Up_dev;
float **Vect_F; // this is the output of the
float **Vect_F_dev;
float **h_Vect_F;
float *h_y;
if(argc < 4){
printf("Error input: filter length #channel #thread\n");
exit(1);
}
int N_sim = atoi(argv[1]);
int N_ch = atoi(argv[2]);
int TD_NUM = atoi(argv[3]);
printf("Baseline FilterBank #task:%d, size:%d, #thread:%d\n", N_ch, N_sim, TD_NUM);
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
cudaStream_t *filter_stream;
int i, j;
double start_timer, end_timer;
filter_stream = (cudaStream_t*)malloc(N_ch*sizeof(cudaStream_t));
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaStreamCreate(&filter_stream[i]));
}
r = (float**)malloc(N_ch*sizeof(float*));
H = (float**)malloc(N_ch*sizeof(float*));
F = (float**)malloc(N_ch*sizeof(float*));
Vect_H = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn = (float**)malloc(N_ch*sizeof(float*));
Vect_Up = (float**)malloc(N_ch*sizeof(float*));
Vect_F = (float**)malloc(N_ch*sizeof(float*));
r_dev = (float**)malloc(N_ch*sizeof(float*));
H_dev = (float**)malloc(N_ch*sizeof(float*));
F_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_H_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Dn_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_Up_dev = (float**)malloc(N_ch*sizeof(float*));
Vect_F_dev = (float**)malloc(N_ch*sizeof(float*));
h_Vect_F = (float**)malloc(N_ch*sizeof(float*));
/*Memory allocation*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaHostAlloc(&r[i], N_sim*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&r_dev[i], N_sim*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&H[i], N_col*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&H_dev[i], N_col*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&F[i], N_col*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&F_dev[i], N_col*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_H[i], N_sim*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_H_dev[i], N_sim*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_Dn[i], (N_sim/N_samp)*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_Dn_dev[i], (N_sim/N_samp)*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_Up[i], N_sim*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_Up_dev[i], N_sim*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&Vect_F[i], N_sim*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&Vect_F_dev[i], N_sim*sizeof(float)));
h_Vect_F[i] = (float*)malloc(N_sim * sizeof(float));
}
y = (float*)malloc(N_sim*sizeof(float));
h_y = (float*)malloc(N_sim*sizeof(float));
/*init data*/
for(i = 0; i < N_ch; i++)
for(j = 0; j < N_sim; j++){
r[i][j] = j + 0.0001;
y[j] = 0;
Vect_Up[i][j] = 0;
Vect_F[i][j] = 0;
Vect_H[i][j]=0;
h_Vect_F[i][j] = 0;
}
for(i = 0; i < N_ch; i++)
for(j = 0; j < N_col; j++){
H[i][j] = 0.0001;
F[i][j] = 0.0001;
}
// Data transfer to device
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaMemcpyAsync(r_dev[i], r[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(Vect_Up_dev[i], Vect_Up[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(Vect_F_dev[i], Vect_F[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(Vect_H_dev[i], Vect_H[i], N_sim*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(H_dev[i], H[i], N_col*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
checkCudaErrors(cudaMemcpyAsync(F_dev[i], F[i], N_col*sizeof(float), cudaMemcpyHostToDevice, filter_stream[i]));
}
checkCudaErrors(cudaDeviceSynchronize());
end_timer = my_timer();
// task launch
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
FBCore<<<1, TD_NUM, 0, filter_stream[i]>>>(r_dev[i], H_dev[i], Vect_H_dev[i], Vect_Dn_dev[i],
Vect_Up_dev[i], Vect_F_dev[i], F_dev[i], TD_NUM, N_sim);
}
checkCudaErrors(cudaDeviceSynchronize());
end_timer = my_timer();
printf("The GPU Elapsed time:%f Sec.\n", end_timer - start_timer);
start_timer = my_timer();
// Data transfer back to host
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaMemcpyAsync(Vect_F[i], Vect_F_dev[i], N_sim*sizeof(float), cudaMemcpyDeviceToHost, filter_stream[i]));
}
checkCudaErrors(cudaDeviceSynchronize());
end_timer = my_timer();
/*Merge process*/
for(i = 0; i < N_ch; i++){
FBComb(y, Vect_F[i], N_sim);
}
printf("CPU program running\n");
/*CPU tasks*/
start_timer = my_timer();
for(i = 0; i < N_ch; i++){
h_FBCore(r[i], H[i], Vect_H[i], Vect_Dn[i], Vect_Up[i], h_Vect_F[i], F[i], N_sim);
}
end_timer = my_timer();
printf("CPU Elapsed time:%f Sec.\n", end_timer - start_timer);
/*Merge process*/
for(i = 0; i < N_ch; i++){
FBComb(h_y, h_Vect_F[i], N_sim);
}
/*Verify*/
printf("Verify\n");
long long flag = 0;
for(i = 0; i < N_sim; i++){
if(abs(h_y[i] - y[i]) > 1e-3){
printf("Error:%f, %f, %d\n", h_y[i], y[i], i);
break;
}
flag ++;
}
if(flag == N_sim) printf("Verify Successfully\n");
/*Free Memory*/
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaStreamDestroy(filter_stream[i]));
}
for(i = 0; i < N_ch; i++){
checkCudaErrors(cudaFreeHost(r[i]));
checkCudaErrors(cudaFree(r_dev[i]));
checkCudaErrors(cudaFreeHost(H[i]));
checkCudaErrors(cudaFree(H_dev[i]));
checkCudaErrors(cudaFreeHost(F[i]));
checkCudaErrors(cudaFree(F_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_H[i]));
checkCudaErrors(cudaFree(Vect_H_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_Dn[i]));
checkCudaErrors(cudaFree(Vect_Dn_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_Up[i]));
checkCudaErrors(cudaFree(Vect_Up_dev[i]));
checkCudaErrors(cudaFreeHost(Vect_F[i]));
}
free(y);
free(r);
free(H);
free(F);
free(Vect_H);
free(Vect_Dn);
free(Vect_Up);
free(Vect_F);
free(r_dev);
free(H_dev);
free(F_dev);
free(Vect_H_dev);
free(Vect_Dn_dev);
free(Vect_Up_dev);
free(Vect_F_dev);
return 0;
}
void h_FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int N_sim){
int j, k;
//convolving H
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_H[j] += (r[j-k]*H[k]);
}
}
}
//Down Sampling
for (j=0; j < N_sim/N_samp; j++)
Vect_Dn[j]=Vect_H[j*N_samp];
//Up Sampling
for (j=0; j < N_sim/N_samp;j++)
Vect_Up[j*N_samp]=Vect_Dn[j];
//convolving F
for (j=0; j< N_sim; j++)
{
for(k = 0; k < N_col; k++){
if((j-k)>=0){
Vect_F[j]+=(F[k]*Vect_Up[j-k]);
}
}
}
}
__global__ void FBCore(float *r, float *H, float *Vect_H, float *Vect_Dn, float *Vect_Up, float *Vect_F, float *F, int TD_NUM, int N_sim){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int j, k;
//convolving H
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_H[j*TD_NUM+tid] += (r[(j*TD_NUM+tid)-k]*H[k]);
}
}
}
}
__syncthreads();
//Down Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM; j++)
Vect_Dn[(j*TD_NUM+tid)]=Vect_H[(j*TD_NUM+tid)*N_samp];
//Up Sampling
if(tid < TD_NUM)
for (j=0; j < N_sim/N_samp/TD_NUM;j++)
Vect_Up[(j*TD_NUM+tid)*N_samp]=Vect_Dn[(j*TD_NUM+tid)];
__syncthreads();
//convolving F
if(tid < TD_NUM){
for (j=0; j< (N_sim/TD_NUM); j++){
for(k = 0; k < N_col; k++){
if(((j*TD_NUM+tid)-k)>=0){
Vect_F[j*TD_NUM+tid]+=(F[k]*Vect_Up[(j*TD_NUM+tid)-k]);
}
}
}
}
}
void FBComb(float *y, float *Vect_F, int N_sim){
int j;
//adding the results to the y matrix
for (j=0; j < N_sim; j++)
y[j]+=Vect_F[j];
}
|
dfa34622014e2239cc1522e849cd53d401b6ba26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T, int D>
__global__ void _RowwiseLinSpace(
const int nthreads,
const int rows,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi % cols;
const int j = yi / cols;
if (j == rows - 1 && j > 0) {
y[yi] = stop.data[i];
} else {
y[yi] = start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(rows - 1));
}
}
}
template <int D>
__global__ void _RowwiseLinSpace(
const int nthreads,
const int rows,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
half* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi % cols;
const int j = yi / cols;
if (j == rows - 1 && j > 0) {
y[yi] = __float2half(float(stop.data[i]));
} else {
y[yi] = __float2half(float(
start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(rows - 1))));
}
}
}
template <typename T, int D>
__global__ void _ColwiseLinSpace(
const int nthreads,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi / cols;
const int j = yi % cols;
if (j == cols - 1 && j > 0) {
y[yi] = stop.data[i];
} else {
y[yi] = start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(cols - 1));
}
}
}
template <int D>
__global__ void _ColwiseLinSpace(
const int nthreads,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
half* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi / cols;
const int j = yi % cols;
if (j == cols - 1 && j > 0) {
y[yi] = __float2half(float(stop.data[i]));
} else {
y[yi] = __float2half(float(
start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(cols - 1))));
}
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void LinSpace<float16, CUDAContext>(
const int rows,
const int cols,
const int axis,
const double* start,
const double* stop,
float16* y,
CUDAContext* ctx) {
CUDA_TENSOR_DIMS_CHECK((axis == 0 ? cols : rows));
const auto nthreads = rows * cols;
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_start;
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_stop;
for (int i = 0; i < (axis == 0 ? cols : rows); ++i) {
Y_start.data[i] = start[i];
Y_stop.data[i] = stop[i];
}
if (axis == 0) {
hipLaunchKernelGGL(( _RowwiseLinSpace),
dim3(CUDA_BLOCKS(nthreads)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(),
nthreads, rows, cols, Y_start, Y_stop, reinterpret_cast<half*>(y));
} else {
hipLaunchKernelGGL(( _ColwiseLinSpace),
dim3(CUDA_BLOCKS(nthreads)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(),
nthreads, cols, Y_start, Y_stop, reinterpret_cast<half*>(y));
}
}
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void LinSpace<T, CUDAContext>( \
const int rows, \
const int cols, \
const int axis, \
const double* start, \
const double* stop, \
T* y, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK((axis == 0 ? cols : rows)); \
const auto nthreads = rows * cols; \
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_start; \
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_stop; \
for (int i = 0; i < (axis == 0 ? cols : rows); ++i) { \
Y_start.data[i] = start[i]; \
Y_stop.data[i] = stop[i]; \
} \
if (axis == 0) { \
hipLaunchKernelGGL(( _RowwiseLinSpace), \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), nthreads, rows, cols, Y_start, Y_stop, y); \
} else { \
hipLaunchKernelGGL(( _ColwiseLinSpace), \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
0, \
ctx->cuda_stream(), nthreads, cols, Y_start, Y_stop, y); \
} \
}
DEFINE_KERNEL_LAUNCHER(int8_t);
DEFINE_KERNEL_LAUNCHER(uint8_t);
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| dfa34622014e2239cc1522e849cd53d401b6ba26.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
template <typename T, int D>
__global__ void _RowwiseLinSpace(
const int nthreads,
const int rows,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi % cols;
const int j = yi / cols;
if (j == rows - 1 && j > 0) {
y[yi] = stop.data[i];
} else {
y[yi] = start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(rows - 1));
}
}
}
template <int D>
__global__ void _RowwiseLinSpace(
const int nthreads,
const int rows,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
half* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi % cols;
const int j = yi / cols;
if (j == rows - 1 && j > 0) {
y[yi] = __float2half(float(stop.data[i]));
} else {
y[yi] = __float2half(float(
start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(rows - 1))));
}
}
}
template <typename T, int D>
__global__ void _ColwiseLinSpace(
const int nthreads,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
T* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi / cols;
const int j = yi % cols;
if (j == cols - 1 && j > 0) {
y[yi] = stop.data[i];
} else {
y[yi] = start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(cols - 1));
}
}
}
template <int D>
__global__ void _ColwiseLinSpace(
const int nthreads,
const int cols,
const SimpleArray<double, D> start,
const SimpleArray<double, D> stop,
half* y) {
CUDA_1D_KERNEL_LOOP(yi, nthreads) {
const int i = yi / cols;
const int j = yi % cols;
if (j == cols - 1 && j > 0) {
y[yi] = __float2half(float(stop.data[i]));
} else {
y[yi] = __float2half(float(
start.data[i] +
j * ((stop.data[i] - start.data[i]) / double(cols - 1))));
}
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void LinSpace<float16, CUDAContext>(
const int rows,
const int cols,
const int axis,
const double* start,
const double* stop,
float16* y,
CUDAContext* ctx) {
CUDA_TENSOR_DIMS_CHECK((axis == 0 ? cols : rows));
const auto nthreads = rows * cols;
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_start;
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_stop;
for (int i = 0; i < (axis == 0 ? cols : rows); ++i) {
Y_start.data[i] = start[i];
Y_stop.data[i] = stop[i];
}
if (axis == 0) {
_RowwiseLinSpace<<<
CUDA_BLOCKS(nthreads),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(
nthreads, rows, cols, Y_start, Y_stop, reinterpret_cast<half*>(y));
} else {
_ColwiseLinSpace<<<
CUDA_BLOCKS(nthreads),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(
nthreads, cols, Y_start, Y_stop, reinterpret_cast<half*>(y));
}
}
#define DEFINE_KERNEL_LAUNCHER(T) \
template <> \
void LinSpace<T, CUDAContext>( \
const int rows, \
const int cols, \
const int axis, \
const double* start, \
const double* stop, \
T* y, \
CUDAContext* ctx) { \
CUDA_TENSOR_DIMS_CHECK((axis == 0 ? cols : rows)); \
const auto nthreads = rows * cols; \
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_start; \
SimpleArray<double, CUDA_TENSOR_MAX_DIMS> Y_stop; \
for (int i = 0; i < (axis == 0 ? cols : rows); ++i) { \
Y_start.data[i] = start[i]; \
Y_stop.data[i] = stop[i]; \
} \
if (axis == 0) { \
_RowwiseLinSpace<<< \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>(nthreads, rows, cols, Y_start, Y_stop, y); \
} else { \
_ColwiseLinSpace<<< \
CUDA_BLOCKS(nthreads), \
CUDA_THREADS, \
0, \
ctx->cuda_stream()>>>(nthreads, cols, Y_start, Y_stop, y); \
} \
}
DEFINE_KERNEL_LAUNCHER(int8_t);
DEFINE_KERNEL_LAUNCHER(uint8_t);
DEFINE_KERNEL_LAUNCHER(int);
DEFINE_KERNEL_LAUNCHER(int64_t);
DEFINE_KERNEL_LAUNCHER(float);
DEFINE_KERNEL_LAUNCHER(double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
1e265b063d2c22f093d566513ecc95d323ef3d20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "my_grid.cu"
#include "my_outs.cu"
#include "transfer.cu"
#include <cutil_inline.h>
#include <time.h>
#include <sys/times.h>
#include "my_kernel.cu"
#include "my_analytic.cu"
// #include "memload_kernels.cu"
/* This is the main function that runs the simulation. It takes the following parameters:
paramater 1: -device=X
X tells us which GPU to run the simulation on. -device=0 will run on the first GPU and -device=N will run on the N+1 GPU. If there are less that N GPUs then it will run on the last GPU.
parameter 2: input file (e.g. test.h5)
The input file that contains all the information needed to run the simulation
parameter 3: output file (e.g. out.h5)
THrow the outputs here.
*/
int main (int argc, char *argv[]) {
my_grid g, d_g; // grid structures, d_g is the grid structure on the device
my_out *outputs; // outputs structure, defines what simulation data we want to return to the user
int cntT; // counter variables
float t; // the time in simulation units
int percentdone, oldpdone; // so that we don't get too impatient
oldpdone = 0;
clock_t start, end; // get the processor time, another way to measure how quickly a simulation runs
tms tms_temp;
printf ("\nInitialize grid\n---\n"); // read in data from the input file
grid_initialize (&g, argv[2]);
int oo = outs_initialize (&outputs, argv[2], g); // oo tells us how many outputs the user has requested
// CUT_DEVICE_INIT(2, argv); // initialize the device
printf ("\nTransfer data to device\n---\n"); // copy over data to device
d_g = grid2device (g);
// help define how the simulation will run on the GPU
dim3 threads (B_XX, B_YY);
dim3 grid (g.xx / (B_XX), g.yy /(B_YY));
printf ("\nThread block dimensions: [%d, %d]\n", B_XX, B_YY);
printf ("Grid dimensions: [%d, %d]\n", g.xx/B_XX, g.yy/B_YY);
printf("\nBegin testing an algorithm\n---\n ");
start = times(&tms_temp);
for (cntT=0 ; cntT<g.tt; cntT++) {
t = (float)(cntT);
hipLaunchKernelGGL(( kernel_E) , dim3(grid), dim3(threads), 0, 0, d_g, t); // update E-fields
hipLaunchKernelGGL(( kernel_H) , dim3(grid), dim3(threads), 0, 0, d_g, t); // update H-fields
outs_extract (outputs, oo, g, d_g, cntT, grid, threads); // extract data, if needed, to output structures. Analytical data is also calculated and extracted in this function
percentdone = (int)(100.00*cntT/g.tt);
if (percentdone > oldpdone) {
printf("\b\b\b\b\b\b\b\b %2d done",percentdone);
fflush(stdout);
oldpdone = percentdone;
}
}
end = times(&tms_temp);
printf ("\n\n%d time units.\n", end-start);
printf ("\nStore result in hdf file\n---\n");
outs_write (outputs, oo, argv[3], g);
printf ("Freeing memory\n");
// CUDA_SAFE_CALL(hipFree( ));
// write something here!
printf("Woo hoo, all done\n");
// CUT_EXIT(argc, argv);
return 0;
}
| 1e265b063d2c22f093d566513ecc95d323ef3d20.cu | #include <stdio.h>
#include "my_grid.cu"
#include "my_outs.cu"
#include "transfer.cu"
#include <cutil_inline.h>
#include <time.h>
#include <sys/times.h>
#include "my_kernel.cu"
#include "my_analytic.cu"
// #include "memload_kernels.cu"
/* This is the main function that runs the simulation. It takes the following parameters:
paramater 1: -device=X
X tells us which GPU to run the simulation on. -device=0 will run on the first GPU and -device=N will run on the N+1 GPU. If there are less that N GPUs then it will run on the last GPU.
parameter 2: input file (e.g. test.h5)
The input file that contains all the information needed to run the simulation
parameter 3: output file (e.g. out.h5)
THrow the outputs here.
*/
int main (int argc, char *argv[]) {
my_grid g, d_g; // grid structures, d_g is the grid structure on the device
my_out *outputs; // outputs structure, defines what simulation data we want to return to the user
int cntT; // counter variables
float t; // the time in simulation units
int percentdone, oldpdone; // so that we don't get too impatient
oldpdone = 0;
clock_t start, end; // get the processor time, another way to measure how quickly a simulation runs
tms tms_temp;
printf ("\nInitialize grid\n---\n"); // read in data from the input file
grid_initialize (&g, argv[2]);
int oo = outs_initialize (&outputs, argv[2], g); // oo tells us how many outputs the user has requested
// CUT_DEVICE_INIT(2, argv); // initialize the device
printf ("\nTransfer data to device\n---\n"); // copy over data to device
d_g = grid2device (g);
// help define how the simulation will run on the GPU
dim3 threads (B_XX, B_YY);
dim3 grid (g.xx / (B_XX), g.yy /(B_YY));
printf ("\nThread block dimensions: [%d, %d]\n", B_XX, B_YY);
printf ("Grid dimensions: [%d, %d]\n", g.xx/B_XX, g.yy/B_YY);
printf("\nBegin testing an algorithm\n---\n ");
start = times(&tms_temp);
for (cntT=0 ; cntT<g.tt; cntT++) {
t = (float)(cntT);
kernel_E <<<grid, threads>>> (d_g, t); // update E-fields
kernel_H <<<grid, threads>>> (d_g, t); // update H-fields
outs_extract (outputs, oo, g, d_g, cntT, grid, threads); // extract data, if needed, to output structures. Analytical data is also calculated and extracted in this function
percentdone = (int)(100.00*cntT/g.tt);
if (percentdone > oldpdone) {
printf("\b\b\b\b\b\b\b\b %2d done",percentdone);
fflush(stdout);
oldpdone = percentdone;
}
}
end = times(&tms_temp);
printf ("\n\n%d time units.\n", end-start);
printf ("\nStore result in hdf file\n---\n");
outs_write (outputs, oo, argv[3], g);
printf ("Freeing memory\n");
// CUDA_SAFE_CALL(cudaFree( ));
// write something here!
printf("Woo hoo, all done\n");
// CUT_EXIT(argc, argv);
return 0;
}
|
cac27025792a97ec8f9f78f1070abeccde9c1d4d.hip | // !!! This is a file automatically generated by hipify!!!
#include <sys/time.h>
#include <stdio.h>
//TODO for writing to file, will be deleted
#include <stdlib.h>
//TODO: could include later
//#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
//#include "../inc/helper_cuda.h"
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
void initDataA(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float) (i+j)/3.0;
}
}
}
void initDataB(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float)3.14*(i+j);
}
}
}
void debugPrint(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
printf("%f ",data[i*ny + j]);
}
printf("\n");
}
printf("\n");
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
int i;
for(i = 0; i < nx*ny; i++){
C[i] = A[i] + B[i];
}
}
// device-side matrix addition
//__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// // kernel code might look something like this
// // but you may want to pad the matrices and index into them accordingly
// int ix = threadIdx.x + blockIdx.x*blockDim.x ;
// int iy = threadIdx.y + blockIdx.y*blockDim.y ;
// int idx = iy*nx + ix ;
// if( (ix<nx) && (iy<ny) )
// C[idx] = A[idx] + B[idx] ;
//}
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
int ix = threadIdx.x;
int iy = threadIdx.y*blockDim.x+blockIdx.x*blockDim.x*blockDim.y;
int idx = iy + ix ;
if(idx<nx*ny){
int x = idx/ny;
int y = idx%ny;
idx = y*nx+x;
C[idx] = A[idx] + B[idx] ;
}
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc != 3) {
printf("Error: wrong number of args\n") ;
exit(1) ;
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
int noElems = nx*ny ;
int bytes = noElems * sizeof(float) ;
// but you may want to pad the matrices
// alloc memory host-side
float *h_A = (float *) malloc( bytes ) ;
float *h_B = (float *) malloc( bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
float *h_dC = (float *) malloc( bytes ) ; // gpu result
// init matrices with random data
//initData( h_A, noElems ) ; initData( h_B, noElems ) ;
initDataA(h_A, nx, ny);
initDataB(h_B, nx, ny);
// alloc memory dev-side
float *d_A, *d_B, *d_C ;
hipMalloc( (void **) &d_A, bytes ) ;
hipMalloc( (void **) &d_B, bytes ) ;
hipMalloc( (void **) &d_C, bytes ) ;
double timeStampA = getTimeStamp() ;
//transfer data to dev
hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice ) ;
hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice ) ;
// note that the transfers would be twice as fast if h_A and h_B
// matrices are pinned
double timeStampB = getTimeStamp() ;
// invoke Kernel
dim3 block( 32, 32 ) ; // you will want to configure this
//int block = 64;
//int grid = (noElems + block-1)/block;
int grid = (noElems + block.x*block.y-1)/(block.x*block.y);
//dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y ) ;
//hipDeviceProp_t GPUprop;
//hipGetDeviceProperties(&GPUprop,0);
//printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]);
//printf("noelems is %d\n",noElems);
//printf("gridx is %d\n",grid);
//printf("gridx is %d and grid y is %d\n",grid.x,grid.y);
hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny ) ;
hipDeviceSynchronize() ;
double timeStampC = getTimeStamp() ;
//copy data back
hipMemcpy( h_dC, d_C, bytes, hipMemcpyDeviceToHost ) ;
double timeStampD = getTimeStamp() ;
// free GPU resources
hipFree( d_A ) ; hipFree( d_B ) ; hipFree( d_C ) ;
hipDeviceReset() ;
// check result
h_addmat( h_A, h_B, h_hC, nx, ny ) ;
// print out results
if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
FILE* fptr;
fptr = fopen("time.log","a");
fprintf(fptr,"%dX%d %.6f %.6f %.6f %.6f\n",nx, ny, timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
fclose(fptr);
printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
}else{
printf("Error: function failed.\n");
}
}
| cac27025792a97ec8f9f78f1070abeccde9c1d4d.cu | #include <sys/time.h>
#include <stdio.h>
//TODO for writing to file, will be deleted
#include <stdlib.h>
//TODO: could include later
//#include <device_launch_parameters.h>
#include <cuda_runtime.h>
//#include "../inc/helper_cuda.h"
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
void initDataA(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float) (i+j)/3.0;
}
}
}
void initDataB(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float)3.14*(i+j);
}
}
}
void debugPrint(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
printf("%f ",data[i*ny + j]);
}
printf("\n");
}
printf("\n");
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
int i;
for(i = 0; i < nx*ny; i++){
C[i] = A[i] + B[i];
}
}
// device-side matrix addition
//__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// // kernel code might look something like this
// // but you may want to pad the matrices and index into them accordingly
// int ix = threadIdx.x + blockIdx.x*blockDim.x ;
// int iy = threadIdx.y + blockIdx.y*blockDim.y ;
// int idx = iy*nx + ix ;
// if( (ix<nx) && (iy<ny) )
// C[idx] = A[idx] + B[idx] ;
//}
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
int ix = threadIdx.x;
int iy = threadIdx.y*blockDim.x+blockIdx.x*blockDim.x*blockDim.y;
int idx = iy + ix ;
if(idx<nx*ny){
int x = idx/ny;
int y = idx%ny;
idx = y*nx+x;
C[idx] = A[idx] + B[idx] ;
}
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc != 3) {
printf("Error: wrong number of args\n") ;
exit(1) ;
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
int noElems = nx*ny ;
int bytes = noElems * sizeof(float) ;
// but you may want to pad the matrices…
// alloc memory host-side
float *h_A = (float *) malloc( bytes ) ;
float *h_B = (float *) malloc( bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
float *h_dC = (float *) malloc( bytes ) ; // gpu result
// init matrices with random data
//initData( h_A, noElems ) ; initData( h_B, noElems ) ;
initDataA(h_A, nx, ny);
initDataB(h_B, nx, ny);
// alloc memory dev-side
float *d_A, *d_B, *d_C ;
cudaMalloc( (void **) &d_A, bytes ) ;
cudaMalloc( (void **) &d_B, bytes ) ;
cudaMalloc( (void **) &d_C, bytes ) ;
double timeStampA = getTimeStamp() ;
//transfer data to dev
cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice ) ;
cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice ) ;
// note that the transfers would be twice as fast if h_A and h_B
// matrices are pinned
double timeStampB = getTimeStamp() ;
// invoke Kernel
dim3 block( 32, 32 ) ; // you will want to configure this
//int block = 64;
//int grid = (noElems + block-1)/block;
int grid = (noElems + block.x*block.y-1)/(block.x*block.y);
//dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y ) ;
//cudaDeviceProp GPUprop;
//cudaGetDeviceProperties(&GPUprop,0);
//printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]);
//printf("noelems is %d\n",noElems);
//printf("gridx is %d\n",grid);
//printf("gridx is %d and grid y is %d\n",grid.x,grid.y);
f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ;
cudaDeviceSynchronize() ;
double timeStampC = getTimeStamp() ;
//copy data back
cudaMemcpy( h_dC, d_C, bytes, cudaMemcpyDeviceToHost ) ;
double timeStampD = getTimeStamp() ;
// free GPU resources
cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ;
cudaDeviceReset() ;
// check result
h_addmat( h_A, h_B, h_hC, nx, ny ) ;
// print out results
if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
FILE* fptr;
fptr = fopen("time.log","a");
fprintf(fptr,"%dX%d %.6f %.6f %.6f %.6f\n",nx, ny, timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
fclose(fptr);
printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
}else{
printf("Error: function failed.\n");
}
}
|
9b9376e76bc2d8819099f243a21a5eb67463b82d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********
* This code integrate the functions performing both CBCT and multi-layer FBCT.
* The flag "FBCT" will automatically select the CBCT/FBCT branches.
*/
__global__ void forward_ray_driven_3d_kernel_correction(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
float vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
float vertex_x2_z;
if (FBCT)
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x; // FBCT geometry
else
vertex_x2_z = Source_z; // CBCT geometry
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (vertex_x1_x == vertex_x2_x) || (vertex_x1_y == vertex_x2_y) ) //Note: You may rotate the angle to avoid this happening
{
d_proj_correction[proj_pixel_index] = 0.0f ;
// printf("Vertical or Horizontal line occurs! Detector_x_idx:%d, Detector_z_idx:%d/n", Detector_x_idx,Detector_z_idx);
// assert(0);
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
if (fabs(vertex_x2_z - vertex_x1_z) < volumn_z*1e-6) // in case x1 and x2 are at the same z position
{
alpha_min = -MAX_infi;
alpha_max = MAX_infi;
// printf("Same horizontal plane occurs! Detector_z_idx:%d/n", Detector_z_idx);
// assert(0);
}
else
{
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
}
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
d_proj_correction[proj_pixel_index] = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (fabs(vertex_x1_z-vertex_x2_z) < volumn_z*1e-6)
voxel_k = k_min-1;
else if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end while
if (one_ray_length < volumn_z*1e-6)
d_proj_correction[proj_pixel_index] = 0.0;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length;
// projection correction (for SART)
}
}//else if
}//else if
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel(float *d_volumn_kernel, float *d_proj_correction, float beta_temp, float sin_theta, float cos_theta, int command)
{
/*
* Reference: "Accelerating simultaneous algebraic reconstruction technique with motion compensation using CUDA-enabled GPU"
* Wai-Man Pang, CUHK
* Section: Back-projection and image update
* d_proj_correction : 2D projection correction, i.e. c(i) in the Wai-Man Pang, CUHK paper
* t_theta : projection angle
* beta_temp : lamda in the paper
* d_volumn: 3D object array
* d_volumn(j) = d_volumn(j) + beta_temp * sum_i (c(i)*w(ij)) / sum_i (w(ij)); where i is ray index, j is voxel index
*/
int Idx_voxel_x = threadIdx.x + blockIdx.x * blockDim.x;
int Idx_voxel_y = blockIdx.y;
int Idx_voxel_z = blockIdx.z;
int image_voxel_index = M * N * Idx_voxel_z + M * Idx_voxel_y + Idx_voxel_x;
//coordinate of center of each voxel in x-y-z system
float coord_voxel_x = boundary_voxel_x + volumn_x*0.5f + Idx_voxel_x * volumn_x;
float coord_voxel_y = boundary_voxel_y + volumn_y*0.5f + Idx_voxel_y * volumn_y;
float coord_voxel_z = boundary_voxel_z + volumn_z*0.5f + Idx_voxel_z * volumn_z;
/**************************************/
float coord_vertex_x=0.0f, coord_vertex_y=0.0f, coord_vertex_z=0.0f;
float coord_vertex_s=0.0f, coord_vertex_t=0.0f;
float coord_vertexOnDetector_x=0.0f, coord_vertexOnDetector_z=0.0f;
float minY = MAX_infi, minZ=MAX_infi, maxY=-MAX_infi, maxZ=-MAX_infi;
float coord_pixelOnDetector_x=0.0f, coord_pixelOnDetector_y=0.0f, coord_pixelOnDetector_z=0.0f;
float coord_source_x=0.0f, coord_source_y=0.0f, coord_source_z=0.0f;
float alpha_x_i_1=0.0f, alpha_x_i=0.0f;
float alpha_y_i_1=0.0f, alpha_y_i=0.0f;
float alpha_z_i_1=0.0f, alpha_z_i=0.0f;
float alpha_x_temp=0.0f, alpha_y_temp=0.0f, alpha_z_temp=0.0f;
float alpha_min=0.0f, alpha_max=0.0f;
int minY_index=0, maxY_index=0, minZ_index=0, maxZ_index=0;
float sumWeight=0.0f, sumLength=0.0f;
float d_x1_x2=0.0f;
float inv_Detector_pixel = 1.0f/Detector_pixel_x;
// float weight = 1.0f;
// float tao;
// float tao_m1 = atan( (float(R)*Detector_pixel_x/2.0f-abs(Offset)) / DSO);
/***********************************************************/
if ( (Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x*(Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x
+ (Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y*(Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y
>= (float(M)*0.5f-0.5)*volumn_x*(float(N)*0.5f-0.5)*volumn_y )
d_volumn_kernel[image_voxel_index] = 0.0f ;
else
// Note: The following codes apply to all the voxels simutaneously
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
if (FBCT)
coord_source_z = coord_voxel_z; // FBCT geometry, multiple sources
else
coord_source_z = Source_z; // CBCT geometry, single source
// coordinate of the source in (x,y,z) system after normal gantry rotation
/******** investigate the eight vertices of each voxel ********/
for (int k=0;k<2;k++)
for (int j=0;j<2;j++)
for (int i=0;i<2;i++)
{
//coordinate for each of eight vertices of the voxel
coord_vertex_x = coord_voxel_x + (i)*volumn_x - 0.5f*volumn_x;
coord_vertex_y = coord_voxel_y + (j)*volumn_y - 0.5f*volumn_y;
coord_vertex_z = coord_voxel_z + (k)*volumn_z - 0.5f*volumn_z;
// <t-s> <----> <x,y>
coord_vertex_t = coord_vertex_x * cos_theta + coord_vertex_y * sin_theta;
coord_vertex_s = - coord_vertex_x * sin_theta + coord_vertex_y * cos_theta;
// Note: Now rotate the image volume (with - t_theata degree) instead of the normal gantry rotation
// In the new coordiantor, detector plane remains and is prependicular to the t axis
// in <t,s> system
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
if (FBCT)
coord_vertexOnDetector_z = coord_voxel_z ; //FBCT geometry, no magnification along z axis
else
coord_vertexOnDetector_z = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_z - Source_z) + coord_vertex_z ; // CBCT geometry
// the projcetion of the vertex of the voxel
minY= fmin(minY, coord_vertexOnDetector_x);
maxY= fmax(maxY, coord_vertexOnDetector_x);
minZ= fmin(minZ, coord_vertexOnDetector_z);
maxZ= fmax(maxZ, coord_vertexOnDetector_z);
// form a minimim bounding rectangle (MBR) for these vertexes
}
minY_index = floor( (minY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
maxY_index = floor( (maxY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
minZ_index = floor( (minZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
maxZ_index = floor( (maxZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
// index of pixels of MBR boudaries on the detector
/***********************************/
// If this voxel does not project on this detector plane, it means there is no ray passing throught this voxel at this angle.
if ( (minY_index<0) && (maxY_index <0) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minY_index>(R-1)) && (maxY_index >(R-1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index<0) && (maxZ_index <0 ) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index>(Z_prj-1)) && (maxZ_index >(Z_prj -1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else
// If this voxel projects on the detector plane
{
if (minY_index <=0)
minY_index = 0;
if (maxY_index >=(R-1) )
maxY_index = R-1;
if (minZ_index <=0)
minZ_index = 0;
if (maxZ_index >=(Z_prj-1) )
maxZ_index = Z_prj-1;
// for those projection pixels whose coordinate loacates inside MBR
// Each pixel coorresponds to a ray, and that ray must pass through the specific voxel
for (int j=minZ_index; j<=maxZ_index; j++)
for (int i=minY_index; i<=maxY_index; i++)
{
coord_pixelOnDetector_x = DOD * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta ;
coord_pixelOnDetector_y = DOD * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta ;
coord_pixelOnDetector_z = Detector_Zmin + j*Detector_pixel_x;
// coordinate of the detector pixel inside MBR in (x,y,z) system after normal gantry rotation
/** Weighted Update for Half Detector **/
// if ( (float(i)*Detector_pixel_x) < 2.0f*abs(Offset) )
// weight = 1.0f;
// else
// {
// tao = atan( ( float(R/2-i)*Detector_pixel_x + abs(Offset) ) / DSO);
// weight = cos(PI/4*(tao/tao_m1 - 1));
// weight = weight * weight;
// }
/******/
// Next: investigate the line starting at x1 and ending at x2
// find out all the rays whose projection lies in the rectangle.
if ( (coord_source_x == coord_pixelOnDetector_x) || (coord_source_y == coord_pixelOnDetector_y) )
// Otherwise you should slightly roate the angle to avoid these situations
{
// assert(0);
sumWeight = 0.0f;
}
else // if ( (coord_source_x != coord_pixelOnDetector_x) && (coord_source_y != coord_pixelOnDetector_y) )
{
alpha_x_i_1 = ( (coord_voxel_x - 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_x_i = ( (coord_voxel_x + 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_y_i_1 = ( (coord_voxel_y - 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_y_i = ( (coord_voxel_y + 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_z_i_1 = ( (coord_voxel_z - 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
alpha_z_i = ( (coord_voxel_z + 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
// find out indices of the two most closet x planes near this specific voxel
alpha_x_temp = fmin((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmin((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = -MAX_infi;
else
alpha_z_temp = fmin((alpha_z_i_1), (alpha_z_i));
alpha_min = fmax(fmax(alpha_x_temp, alpha_y_temp), fmax(alpha_y_temp, alpha_z_temp));
// alpha_min is the enter point for one specific voxel
alpha_x_temp = fmax((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmax((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = MAX_infi;
else
alpha_z_temp = fmax((alpha_z_i_1), (alpha_z_i));
alpha_max = fmin(fmin(alpha_x_temp, alpha_y_temp), fmin(alpha_y_temp, alpha_z_temp));
// alpha_max is the exit point of the line passing through this voxel
if (alpha_max-alpha_min>0) // if the value is negative, it means the ray does not pass through this voxel
{
d_x1_x2 = sqrt((coord_source_x-coord_pixelOnDetector_x)*(coord_source_x-coord_pixelOnDetector_x) + (coord_source_y-coord_pixelOnDetector_y)*(coord_source_y - coord_pixelOnDetector_y) + (coord_source_z-coord_pixelOnDetector_z)*(coord_source_z-coord_pixelOnDetector_z) );
float temp = d_x1_x2*(alpha_max-alpha_min);
if ( temp > volumn_x*1e-6)
// the line passes through the voxel with a sufficient length;
{
sumWeight = sumWeight + temp*d_proj_correction[j*R + i];
// Note: d_proj_correction[j*R + i] is c(i) which has been previously calculated
// Note: d_x1_x2 * (alpha_max - alpha_min) is w(i) for ray i of this projection
sumLength = sumLength + temp;
}
}
}
}// end for loop: all the rays whose projection fits in the rectangle
if (sumLength < volumn_x*1e-6)
d_volumn_kernel[image_voxel_index] += 0.0f ;
else
{
if (command==0)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight ; // matched ajoint operator, for test use
else if (command==1)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight/sumLength ;
}
}//end else if this voxel projects on this detector plane
}//end else if the reconstruction region is in the circle
// __syncthreads();
}
__global__ void reduce_norm_2_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]*g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_tv_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_l2(float *g_idata, float *g_odata, unsigned int n)
{
//load shared mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? fabs(g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_end(float *g_idata, float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = (tid < n) ? fabs(g_idata[tid]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[0] = sqrt(sdata[0]);
}
__global__ void tv_gradient_matrix_3d_kernel(float *df, float *d_volumn, float epi)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
float s_add_1_tl, s_add_1_t_sub_1_l, s_add_1_t_l_sub_1;
float s_t_add_1_l, s_sub_1_t_add_1_l, s_t_add_1_l_sub_1;
float st_l_add_1, s_sub_1_t_l_add_1, s_t_sub_1_l_add_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
s_add_1_tl = d_volumn[(by_id+1)*N*M + bx_id*M + t_id];
s_add_1_t_sub_1_l = d_volumn[(by_id+1)*N*M + (bx_id-1)*M + t_id];
s_add_1_t_l_sub_1 = d_volumn[(by_id+1)*N*M + bx_id*M + t_id-1];
s_t_add_1_l = d_volumn[by_id*N*M + (bx_id+1)*M + t_id];
s_sub_1_t_add_1_l = d_volumn[(by_id-1)*N*M + (bx_id+1)*M + t_id];
s_t_add_1_l_sub_1 = d_volumn[by_id*N*M + (bx_id+1)*M + t_id-1];
st_l_add_1 =d_volumn[by_id*N*M + bx_id*M + t_id + 1];
s_sub_1_t_l_add_1 = d_volumn[(by_id-1)*N*M + bx_id*M + t_id + 1];
s_t_sub_1_l_add_1 = d_volumn[by_id*N*M + (bx_id-1)*M + t_id + 1];
df[by_id*N*M + bx_id*M + t_id] = ((stl - s_sub_1_tl) + (stl - s_t_sub_1_l) + (stl - st_l_sub_1) ) /sqrt(epi + (stl - s_sub_1_tl)* (stl - s_sub_1_tl) + (stl - s_t_sub_1_l)* (stl - s_t_sub_1_l) + (stl - st_l_sub_1)* (stl - st_l_sub_1) )
- (s_add_1_tl - stl)/sqrt(epi + (s_add_1_tl - stl)*(s_add_1_tl - stl) + (s_add_1_tl - s_add_1_t_sub_1_l)*(s_add_1_tl - s_add_1_t_sub_1_l) + (s_add_1_tl - s_add_1_t_l_sub_1)*(s_add_1_tl - s_add_1_t_l_sub_1))
- (s_t_add_1_l - stl)/sqrt(epi + (s_t_add_1_l - s_sub_1_t_add_1_l)*(s_t_add_1_l - s_sub_1_t_add_1_l) + (s_t_add_1_l - stl)*(s_t_add_1_l - stl) + (s_t_add_1_l - s_t_add_1_l_sub_1)* (s_t_add_1_l - s_t_add_1_l_sub_1))
- (st_l_add_1 - stl)/sqrt(epi + (st_l_add_1 - s_sub_1_t_l_add_1)*(st_l_add_1 - s_sub_1_t_l_add_1) + (st_l_add_1 - s_t_sub_1_l_add_1)*(st_l_add_1 - s_t_sub_1_l_add_1) + (st_l_add_1 - stl)* (st_l_add_1 - stl));
}
__global__ void tv_matrix_3d_kernel(float *df, float *d_volumn)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
df[by_id*N*M + bx_id*M + t_id] = sqrt( (stl - s_sub_1_tl)*(stl - s_sub_1_tl) + (stl - s_t_sub_1_l)*(stl - s_t_sub_1_l) + (stl - st_l_sub_1)*(stl - st_l_sub_1)) ;
}
__global__ void backtracking_update_kernel(float *d_volumn_f_update,float *d_volumn_f, float *d_tv_gradient_matrix ,float alpha_temp)
{
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
d_volumn_f_update[i] = d_volumn_f[i] - alpha_temp*d_tv_gradient_matrix[i];
}
///************ GHF new Code **************///
| 9b9376e76bc2d8819099f243a21a5eb67463b82d.cu | /********
* This code integrate the functions performing both CBCT and multi-layer FBCT.
* The flag "FBCT" will automatically select the CBCT/FBCT branches.
*/
__global__ void forward_ray_driven_3d_kernel_correction(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
int Detector_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int Detector_z_idx = blockIdx.y;
int proj_pixel_index = Detector_z_idx * R + Detector_x_idx;
// Source position (X2): coordinate in (x,y,z) system .
float vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
float vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
float vertex_x2_z;
if (FBCT)
vertex_x2_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x; // FBCT geometry
else
vertex_x2_z = Source_z; // CBCT geometry
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x = DOD * cos_theta - (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * sin_theta;
float vertex_x1_y = DOD * sin_theta + (Detector_Ymin + Detector_x_idx * Detector_pixel_x) * cos_theta;
float vertex_x1_z = Detector_Zmin + Detector_z_idx * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
if ( (vertex_x1_x == vertex_x2_x) || (vertex_x1_y == vertex_x2_y) ) //Note: You may rotate the angle to avoid this happening
{
d_proj_correction[proj_pixel_index] = 0.0f ;
// printf("Vertical or Horizontal line occurs! Detector_x_idx:%d, Detector_z_idx:%d/n", Detector_x_idx,Detector_z_idx);
// assert(0);
}
else // if ( (vertex_x1_x != vertex_x2_x) && (vertex_x1_y != vertex_x2_y) )
{
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
if (fabs(vertex_x2_z - vertex_x1_z) < volumn_z*1e-6) // in case x1 and x2 are at the same z position
{
alpha_min = -MAX_infi;
alpha_max = MAX_infi;
// printf("Same horizontal plane occurs! Detector_z_idx:%d/n", Detector_z_idx);
// assert(0);
}
else
{
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
}
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
d_proj_correction[proj_pixel_index] = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x > vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y > vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (fabs(vertex_x1_z-vertex_x2_z) < volumn_z*1e-6)
voxel_k = k_min-1;
else if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end while
if (one_ray_length < volumn_z*1e-6)
d_proj_correction[proj_pixel_index] = 0.0;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length;
// projection correction (for SART)
}
}//else if
}//else if
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel(float *d_volumn_kernel, float *d_proj_correction, float beta_temp, float sin_theta, float cos_theta, int command)
{
/*
* Reference: "Accelerating simultaneous algebraic reconstruction technique with motion compensation using CUDA-enabled GPU"
* Wai-Man Pang, CUHK
* Section: Back-projection and image update
* d_proj_correction : 2D projection correction, i.e. c(i) in the Wai-Man Pang, CUHK paper
* t_theta : projection angle
* beta_temp : lamda in the paper
* d_volumn: 3D object array
* d_volumn(j) = d_volumn(j) + beta_temp * sum_i (c(i)*w(ij)) / sum_i (w(ij)); where i is ray index, j is voxel index
*/
int Idx_voxel_x = threadIdx.x + blockIdx.x * blockDim.x;
int Idx_voxel_y = blockIdx.y;
int Idx_voxel_z = blockIdx.z;
int image_voxel_index = M * N * Idx_voxel_z + M * Idx_voxel_y + Idx_voxel_x;
//coordinate of center of each voxel in x-y-z system
float coord_voxel_x = boundary_voxel_x + volumn_x*0.5f + Idx_voxel_x * volumn_x;
float coord_voxel_y = boundary_voxel_y + volumn_y*0.5f + Idx_voxel_y * volumn_y;
float coord_voxel_z = boundary_voxel_z + volumn_z*0.5f + Idx_voxel_z * volumn_z;
/**************************************/
float coord_vertex_x=0.0f, coord_vertex_y=0.0f, coord_vertex_z=0.0f;
float coord_vertex_s=0.0f, coord_vertex_t=0.0f;
float coord_vertexOnDetector_x=0.0f, coord_vertexOnDetector_z=0.0f;
float minY = MAX_infi, minZ=MAX_infi, maxY=-MAX_infi, maxZ=-MAX_infi;
float coord_pixelOnDetector_x=0.0f, coord_pixelOnDetector_y=0.0f, coord_pixelOnDetector_z=0.0f;
float coord_source_x=0.0f, coord_source_y=0.0f, coord_source_z=0.0f;
float alpha_x_i_1=0.0f, alpha_x_i=0.0f;
float alpha_y_i_1=0.0f, alpha_y_i=0.0f;
float alpha_z_i_1=0.0f, alpha_z_i=0.0f;
float alpha_x_temp=0.0f, alpha_y_temp=0.0f, alpha_z_temp=0.0f;
float alpha_min=0.0f, alpha_max=0.0f;
int minY_index=0, maxY_index=0, minZ_index=0, maxZ_index=0;
float sumWeight=0.0f, sumLength=0.0f;
float d_x1_x2=0.0f;
float inv_Detector_pixel = 1.0f/Detector_pixel_x;
// float weight = 1.0f;
// float tao;
// float tao_m1 = atan( (float(R)*Detector_pixel_x/2.0f-abs(Offset)) / DSO);
/***********************************************************/
if ( (Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x*(Idx_voxel_x-(float(M)*0.5f-0.5)-M_Offset)*volumn_x
+ (Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y*(Idx_voxel_y-(float(N)*0.5f-0.5))*volumn_y
>= (float(M)*0.5f-0.5)*volumn_x*(float(N)*0.5f-0.5)*volumn_y )
d_volumn_kernel[image_voxel_index] = 0.0f ;
else
// Note: The following codes apply to all the voxels simutaneously
{
coord_source_x = Source_x * cos_theta - Source_y * sin_theta;
coord_source_y = Source_x * sin_theta + Source_y * cos_theta;
if (FBCT)
coord_source_z = coord_voxel_z; // FBCT geometry, multiple sources
else
coord_source_z = Source_z; // CBCT geometry, single source
// coordinate of the source in (x,y,z) system after normal gantry rotation
/******** investigate the eight vertices of each voxel ********/
for (int k=0;k<2;k++)
for (int j=0;j<2;j++)
for (int i=0;i<2;i++)
{
//coordinate for each of eight vertices of the voxel
coord_vertex_x = coord_voxel_x + (i)*volumn_x - 0.5f*volumn_x;
coord_vertex_y = coord_voxel_y + (j)*volumn_y - 0.5f*volumn_y;
coord_vertex_z = coord_voxel_z + (k)*volumn_z - 0.5f*volumn_z;
// <t-s> <----> <x,y>
coord_vertex_t = coord_vertex_x * cos_theta + coord_vertex_y * sin_theta;
coord_vertex_s = - coord_vertex_x * sin_theta + coord_vertex_y * cos_theta;
// Note: Now rotate the image volume (with - t_theata degree) instead of the normal gantry rotation
// In the new coordiantor, detector plane remains and is prependicular to the t axis
// in <t,s> system
coord_vertexOnDetector_x = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_s - Source_y) + coord_vertex_s ;
if (FBCT)
coord_vertexOnDetector_z = coord_voxel_z ; //FBCT geometry, no magnification along z axis
else
coord_vertexOnDetector_z = (coord_vertex_t - DOD) / (DSO- coord_vertex_t) * (coord_vertex_z - Source_z) + coord_vertex_z ; // CBCT geometry
// the projcetion of the vertex of the voxel
minY= fmin(minY, coord_vertexOnDetector_x);
maxY= fmax(maxY, coord_vertexOnDetector_x);
minZ= fmin(minZ, coord_vertexOnDetector_z);
maxZ= fmax(maxZ, coord_vertexOnDetector_z);
// form a minimim bounding rectangle (MBR) for these vertexes
}
minY_index = floor( (minY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
maxY_index = floor( (maxY - Detector_Ymin ) * inv_Detector_pixel +0.5f);
minZ_index = floor( (minZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
maxZ_index = floor( (maxZ - Detector_Zmin ) * inv_Detector_pixel +0.5f);
// index of pixels of MBR boudaries on the detector
/***********************************/
// If this voxel does not project on this detector plane, it means there is no ray passing throught this voxel at this angle.
if ( (minY_index<0) && (maxY_index <0) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minY_index>(R-1)) && (maxY_index >(R-1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index<0) && (maxZ_index <0 ) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else if ( (minZ_index>(Z_prj-1)) && (maxZ_index >(Z_prj -1)) )
{
d_volumn_kernel[image_voxel_index] += 0.0f ;
}
else
// If this voxel projects on the detector plane
{
if (minY_index <=0)
minY_index = 0;
if (maxY_index >=(R-1) )
maxY_index = R-1;
if (minZ_index <=0)
minZ_index = 0;
if (maxZ_index >=(Z_prj-1) )
maxZ_index = Z_prj-1;
// for those projection pixels whose coordinate loacates inside MBR
// Each pixel coorresponds to a ray, and that ray must pass through the specific voxel
for (int j=minZ_index; j<=maxZ_index; j++)
for (int i=minY_index; i<=maxY_index; i++)
{
coord_pixelOnDetector_x = DOD * cos_theta - (Detector_Ymin + i*Detector_pixel_x) * sin_theta ;
coord_pixelOnDetector_y = DOD * sin_theta + (Detector_Ymin + i*Detector_pixel_x) * cos_theta ;
coord_pixelOnDetector_z = Detector_Zmin + j*Detector_pixel_x;
// coordinate of the detector pixel inside MBR in (x,y,z) system after normal gantry rotation
/** Weighted Update for Half Detector **/
// if ( (float(i)*Detector_pixel_x) < 2.0f*abs(Offset) )
// weight = 1.0f;
// else
// {
// tao = atan( ( float(R/2-i)*Detector_pixel_x + abs(Offset) ) / DSO);
// weight = cos(PI/4*(tao/tao_m1 - 1));
// weight = weight * weight;
// }
/******/
// Next: investigate the line starting at x1 and ending at x2
// find out all the rays whose projection lies in the rectangle.
if ( (coord_source_x == coord_pixelOnDetector_x) || (coord_source_y == coord_pixelOnDetector_y) )
// Otherwise you should slightly roate the angle to avoid these situations
{
// assert(0);
sumWeight = 0.0f;
}
else // if ( (coord_source_x != coord_pixelOnDetector_x) && (coord_source_y != coord_pixelOnDetector_y) )
{
alpha_x_i_1 = ( (coord_voxel_x - 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_x_i = ( (coord_voxel_x + 0.5f*volumn_x) - coord_pixelOnDetector_x )/( coord_source_x - coord_pixelOnDetector_x );
alpha_y_i_1 = ( (coord_voxel_y - 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_y_i = ( (coord_voxel_y + 0.5f*volumn_y) - coord_pixelOnDetector_y )/( coord_source_y - coord_pixelOnDetector_y );
alpha_z_i_1 = ( (coord_voxel_z - 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
alpha_z_i = ( (coord_voxel_z + 0.5f*volumn_z) - coord_pixelOnDetector_z )/( coord_source_z - coord_pixelOnDetector_z );
// find out indices of the two most closet x planes near this specific voxel
alpha_x_temp = fmin((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmin((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = -MAX_infi;
else
alpha_z_temp = fmin((alpha_z_i_1), (alpha_z_i));
alpha_min = fmax(fmax(alpha_x_temp, alpha_y_temp), fmax(alpha_y_temp, alpha_z_temp));
// alpha_min is the enter point for one specific voxel
alpha_x_temp = fmax((alpha_x_i_1), (alpha_x_i));
alpha_y_temp = fmax((alpha_y_i_1), (alpha_y_i));
if (fabs(coord_source_z - coord_pixelOnDetector_z) < volumn_z*1e-6)
alpha_z_temp = MAX_infi;
else
alpha_z_temp = fmax((alpha_z_i_1), (alpha_z_i));
alpha_max = fmin(fmin(alpha_x_temp, alpha_y_temp), fmin(alpha_y_temp, alpha_z_temp));
// alpha_max is the exit point of the line passing through this voxel
if (alpha_max-alpha_min>0) // if the value is negative, it means the ray does not pass through this voxel
{
d_x1_x2 = sqrt((coord_source_x-coord_pixelOnDetector_x)*(coord_source_x-coord_pixelOnDetector_x) + (coord_source_y-coord_pixelOnDetector_y)*(coord_source_y - coord_pixelOnDetector_y) + (coord_source_z-coord_pixelOnDetector_z)*(coord_source_z-coord_pixelOnDetector_z) );
float temp = d_x1_x2*(alpha_max-alpha_min);
if ( temp > volumn_x*1e-6)
// the line passes through the voxel with a sufficient length;
{
sumWeight = sumWeight + temp*d_proj_correction[j*R + i];
// Note: d_proj_correction[j*R + i] is c(i) which has been previously calculated
// Note: d_x1_x2 * (alpha_max - alpha_min) is w(i) for ray i of this projection
sumLength = sumLength + temp;
}
}
}
}// end for loop: all the rays whose projection fits in the rectangle
if (sumLength < volumn_x*1e-6)
d_volumn_kernel[image_voxel_index] += 0.0f ;
else
{
if (command==0)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight ; // matched ajoint operator, for test use
else if (command==1)
d_volumn_kernel[image_voxel_index] += beta_temp * sumWeight/sumLength ;
}
}//end else if this voxel projects on this detector plane
}//end else if the reconstruction region is in the circle
// __syncthreads();
}
__global__ void reduce_norm_2_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]*g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_tv_kernel_l1(float *g_idata, float *g_odata, unsigned int n)
{
//load shared_mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? (g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.y*gridDim.x + blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_l2(float *g_idata, float *g_odata, unsigned int n)
{
//load shared mem
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? fabs(g_idata[i]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce_norm_2_kernel_end(float *g_idata, float *g_odata, unsigned int n)
{
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = (tid < n) ? fabs(g_idata[tid]) : 0;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[0] = sqrt(sdata[0]);
}
__global__ void tv_gradient_matrix_3d_kernel(float *df, float *d_volumn, float epi)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
float s_add_1_tl, s_add_1_t_sub_1_l, s_add_1_t_l_sub_1;
float s_t_add_1_l, s_sub_1_t_add_1_l, s_t_add_1_l_sub_1;
float st_l_add_1, s_sub_1_t_l_add_1, s_t_sub_1_l_add_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
s_add_1_tl = d_volumn[(by_id+1)*N*M + bx_id*M + t_id];
s_add_1_t_sub_1_l = d_volumn[(by_id+1)*N*M + (bx_id-1)*M + t_id];
s_add_1_t_l_sub_1 = d_volumn[(by_id+1)*N*M + bx_id*M + t_id-1];
s_t_add_1_l = d_volumn[by_id*N*M + (bx_id+1)*M + t_id];
s_sub_1_t_add_1_l = d_volumn[(by_id-1)*N*M + (bx_id+1)*M + t_id];
s_t_add_1_l_sub_1 = d_volumn[by_id*N*M + (bx_id+1)*M + t_id-1];
st_l_add_1 =d_volumn[by_id*N*M + bx_id*M + t_id + 1];
s_sub_1_t_l_add_1 = d_volumn[(by_id-1)*N*M + bx_id*M + t_id + 1];
s_t_sub_1_l_add_1 = d_volumn[by_id*N*M + (bx_id-1)*M + t_id + 1];
df[by_id*N*M + bx_id*M + t_id] = ((stl - s_sub_1_tl) + (stl - s_t_sub_1_l) + (stl - st_l_sub_1) ) /sqrt(epi + (stl - s_sub_1_tl)* (stl - s_sub_1_tl) + (stl - s_t_sub_1_l)* (stl - s_t_sub_1_l) + (stl - st_l_sub_1)* (stl - st_l_sub_1) )
- (s_add_1_tl - stl)/sqrt(epi + (s_add_1_tl - stl)*(s_add_1_tl - stl) + (s_add_1_tl - s_add_1_t_sub_1_l)*(s_add_1_tl - s_add_1_t_sub_1_l) + (s_add_1_tl - s_add_1_t_l_sub_1)*(s_add_1_tl - s_add_1_t_l_sub_1))
- (s_t_add_1_l - stl)/sqrt(epi + (s_t_add_1_l - s_sub_1_t_add_1_l)*(s_t_add_1_l - s_sub_1_t_add_1_l) + (s_t_add_1_l - stl)*(s_t_add_1_l - stl) + (s_t_add_1_l - s_t_add_1_l_sub_1)* (s_t_add_1_l - s_t_add_1_l_sub_1))
- (st_l_add_1 - stl)/sqrt(epi + (st_l_add_1 - s_sub_1_t_l_add_1)*(st_l_add_1 - s_sub_1_t_l_add_1) + (st_l_add_1 - s_t_sub_1_l_add_1)*(st_l_add_1 - s_t_sub_1_l_add_1) + (st_l_add_1 - stl)* (st_l_add_1 - stl));
}
__global__ void tv_matrix_3d_kernel(float *df, float *d_volumn)
{
int t_id, bx_id, by_id;
t_id = threadIdx.x+1;
bx_id = blockIdx.x+1;
by_id = blockIdx.y+1;
float stl, s_sub_1_tl, s_t_sub_1_l, st_l_sub_1;
stl = d_volumn[by_id*N*M + bx_id*M + t_id];
s_sub_1_tl = d_volumn[(by_id-1)*N*M + bx_id*M + t_id];
s_t_sub_1_l = d_volumn[by_id*N*M + (bx_id-1)*M + t_id];
st_l_sub_1 = d_volumn[by_id*N*M + bx_id*M + t_id-1];
df[by_id*N*M + bx_id*M + t_id] = sqrt( (stl - s_sub_1_tl)*(stl - s_sub_1_tl) + (stl - s_t_sub_1_l)*(stl - s_t_sub_1_l) + (stl - st_l_sub_1)*(stl - st_l_sub_1)) ;
}
__global__ void backtracking_update_kernel(float *d_volumn_f_update,float *d_volumn_f, float *d_tv_gradient_matrix ,float alpha_temp)
{
unsigned int i = blockIdx.y* blockDim.x*gridDim.x + blockIdx.x*blockDim.x + threadIdx.x;
d_volumn_f_update[i] = d_volumn_f[i] - alpha_temp*d_tv_gradient_matrix[i];
}
///************ GHF new Code **************///
|
fa12277945fe97a7c2e1201b7b12390aaf0df0cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
int main(int argc, char *argv[]) {
int iDev = 0;
hipDeviceProp_t iProp;
hipGetDeviceProperties(&iProp, iDev);
printf("Device %d: %s\n", iDev, iProp.name);
printf("Number of multiprocessors: %d\n", iProp.multiProcessorCount);
printf("Total amount of constant memory: %4.2f KB\n",
iProp.totalConstMem / 1024.0);
printf("Total amount of shared memory per block: %4.2f KB\n",
iProp.sharedMemPerBlock / 1024.0);
printf("Total number of registers available per block: %d\n",
iProp.regsPerBlock);
printf("Warp size%d\n", iProp.warpSize);
printf("Maximum number of threads per block: %d\n", iProp.maxThreadsPerBlock);
printf("Maximum number of threads per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor);
printf("Maximum number of warps per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor / 32);
return EXIT_SUCCESS;
}
| fa12277945fe97a7c2e1201b7b12390aaf0df0cc.cu | #include <stdio.h>
#include <cuda_runtime.h>
int main(int argc, char *argv[]) {
int iDev = 0;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, iDev);
printf("Device %d: %s\n", iDev, iProp.name);
printf("Number of multiprocessors: %d\n", iProp.multiProcessorCount);
printf("Total amount of constant memory: %4.2f KB\n",
iProp.totalConstMem / 1024.0);
printf("Total amount of shared memory per block: %4.2f KB\n",
iProp.sharedMemPerBlock / 1024.0);
printf("Total number of registers available per block: %d\n",
iProp.regsPerBlock);
printf("Warp size%d\n", iProp.warpSize);
printf("Maximum number of threads per block: %d\n", iProp.maxThreadsPerBlock);
printf("Maximum number of threads per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor);
printf("Maximum number of warps per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor / 32);
return EXIT_SUCCESS;
}
|
e1a0630de4b91b727c7aac2c4807caf7d08321c6.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SparseLinear.cu"
#else
static bool THNN_(checkInput)(THCTensor* t)
{
return !t->is_empty() && t->_dim() == 2 && t->size[1] == 3;
}
static bool THNN_(checkSize2D)(THCTensor* t, int64_t size0, int64_t size1)
{
return !t->is_empty() && t->_dim() == 2 && t->size[0] == size0 && t->size[1] == size1;
}
static bool THNN_(checkSize1D)(THCTensor* t, int64_t size0)
{
return !t->is_empty() && t->_dim() == 1 && t->size[0] == size0;
}
static inline void THNN_(copyCudaFloatingType)(THCState *state, THCudaIntTensor *buf, THCTensor *t) {
#ifdef THC_REAL_IS_FLOAT
THCudaIntTensor_copyCudaFloat(state, buf, t);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaIntTensor_copyCudaDouble(state, buf, t);
#elif defined(THC_REAL_IS_HALF)
THCudaIntTensor_copyCudaHalf(state, buf, t);
#endif
}
void THNN_(SparseLinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias)
{
THAssert(THCTensor_(checkGPU)(state, 4, input, output, weight, bias));
int64_t h;
int64_t outDim = THCTensor_(size)(state, weight, 0);
int64_t inDim = THCTensor_(size)(state, weight, 1);
THArgCheck(THNN_(checkInput)(input), 2, "input size must be nnz x 3");
AT_CHECK(!output->is_empty() && THCTensor_(nDimension)(state, output) == 2,
"output must be batchsize x outputsize, got size: ", output->sizes());
THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong");
weight = THCTensor_(newContiguous)(state, weight);
int64_t batchnum = THCTensor_(size)(state, output, 0);
int64_t nnz = THCTensor_(size)(state, input, 0);
THCTensor *buffer = THCTensor_(new)(state);
THCTensor *sel = THCTensor_(new)(state);
THCTensor *values = THCTensor_(new)(state);
THCudaIntTensor *rowbuf = THCudaIntTensor_new(state);
THCudaIntTensor *csrPtrs = THCudaIntTensor_new(state);
THCudaIntTensor *colInds = THCudaIntTensor_new(state);
THCTensor_(resize1d)(state, values, nnz);
THCudaIntTensor_resize1d(state, rowbuf, nnz);
THCudaIntTensor_resize1d(state, colInds, nnz);
THCudaIntTensor_resize1d(state, csrPtrs, batchnum+1);
// Get data ready for cusparse, need CudaInt buffers
// We do not need to sort, since rows are already in order
// If rows might get out of order in future implementations, or if cusparse
// complains with an illegal memory access, sort like we do in AccGradParameters
THCTensor_(select)(state, sel, input, 1, 0);
THNN_(copyCudaFloatingType)(state, rowbuf, sel);
THCTensor_(select)(state, sel, input, 1, 1);
THNN_(copyCudaFloatingType)(state, colInds, sel);
THCTensor_(select)(state, sel, input, 1, 2);
THCTensor_(copyCuda)(state, values, sel);
init_cusparse();
hipsparseXcoo2csr(cusparse_handle,
THCudaIntTensor_data(state, rowbuf), nnz, batchnum,
THCudaIntTensor_data(state, csrPtrs), HIPSPARSE_INDEX_BASE_ONE);
// output = bias
THCTensor_(resize2d)(state, buffer, outDim, batchnum);
THCTensor_(zero)(state, buffer);
for (h=0; h<batchnum; h++) {
THCTensor_(select)(state, sel, buffer, 1, h);
THCTensor_(copy)(state, sel, bias);
}
// output = W * x
real one = ScalarConvert<int, real>::to(1);
hipsparseMatDescr_t descr = 0;
hipsparseCreateMatDescr(&descr);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ONE);
#ifdef THC_REAL_IS_FLOAT
hipsparseScsrmm(cusparse_handle,
#elif defined(THC_REAL_IS_DOUBLE)
hipsparseDcsrmm(cusparse_handle,
#endif
HIPSPARSE_OPERATION_NON_TRANSPOSE,
batchnum, outDim, inDim, nnz,
&one,
descr,
THCTensor_(data)(state, values),
THCudaIntTensor_data(state, csrPtrs),
THCudaIntTensor_data(state, colInds),
THCTensor_(data)(state, weight), inDim,
&one, THCTensor_(data)(state, buffer), batchnum
);
THCTensor_(transpose)(state, buffer, NULL, 0, 1);
// We do work in the buffer to keep the output contiguous
THCTensor_(copy)(state, output, buffer);
hipsparseDestroyMatDescr(descr);
descr = 0;
THCTensor_(free)(state, buffer);
THCTensor_(free)(state, sel);
THCTensor_(free)(state, values);
THCTensor_(free)(state, weight);
THCudaIntTensor_free(state, rowbuf);
THCudaIntTensor_free(state, colInds);
THCudaIntTensor_free(state, csrPtrs);
}
void THNN_(SparseLinear_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *weight,
THCTensor *bias,
accreal weightDecay,
accreal scale)
{
int64_t outDim = THCTensor_(size)(state, weight, 0);
int64_t inDim = THCTensor_(size)(state, weight, 1);
THArgCheck(THNN_(checkInput)(input), 2, "input size must be batchsize x nnz x 2");
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong");
weight = THCTensor_(newContiguous)(state, weight);
int64_t nnz = THCTensor_(size)(state, input, 0);
int64_t batchnum = THCTensor_(size)(state, gradOutput, 0);
THCTensor *buf = THCTensor_(new)(state);
THCTensor *cols = THCTensor_(new)(state);
THCTensor *sel = THCTensor_(new)(state);
THCudaLongTensor *inds = THCudaLongTensor_new(state);
THCTensor *values = THCTensor_(new)(state);
THCudaIntTensor *colbuf = THCudaIntTensor_new(state);
THCudaIntTensor *colPtrs = THCudaIntTensor_new(state);
THCudaIntTensor *rowInds = THCudaIntTensor_new(state);
THCTensor_(select)(state, sel, input, 1, 0); // rowInds
THCTensor_(select)(state, cols, input, 1, 1); // colInds
THCTensor_(cadd)(state, buf, sel, batchnum, cols); // colInds * buatchdim + rowInds
THCTensor_(sort)(state, buf, inds, buf, 0, 0); // Indices are now in ind
THCTensor_(indexSelect)(state, buf, input, 0, inds);
THCTensor_(resize1d)(state, values, nnz);
THCudaIntTensor_resize1d(state, colbuf, nnz);
THCudaIntTensor_resize1d(state, rowInds, nnz);
THCudaIntTensor_resize1d(state, colPtrs, inDim+1);
// Get data ready for cusparse, need CudaInt buffers
THCTensor_(select)(state, sel, buf, 1, 0);
THNN_(copyCudaFloatingType)(state, rowInds, sel);
THCTensor_(select)(state, sel, buf, 1, 1);
THNN_(copyCudaFloatingType)(state, colbuf, sel);
THCTensor_(select)(state, sel, buf, 1, 2);
THCTensor_(copyCuda)(state, values, sel);
init_cusparse();
// Secretly coo2csc
hipsparseXcoo2csr(cusparse_handle,
THCudaIntTensor_data(state, colbuf), nnz, inDim,
THCudaIntTensor_data(state, colPtrs), HIPSPARSE_INDEX_BASE_ONE);
// FORTRAN expects contiguous col-major matricies
THCTensor *tgradOutput = THCTensor_(new)(state);
THCTensor_(transpose)(state, tgradOutput, gradOutput, 0, 1);
THCTensor_(resize2d)(state, buf, batchnum, outDim);
THCTensor_(copy)(state, buf, tgradOutput);
THCTensor_(free)(state, tgradOutput);
real one = ScalarConvert<int, real>::to(1);
hipsparseMatDescr_t descr = 0;
hipsparseCreateMatDescr(&descr);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ONE);
#ifdef THC_REAL_IS_FLOAT
hipsparseScsrmm(cusparse_handle,
#elif defined(THC_REAL_IS_DOUBLE)
hipsparseDcsrmm(cusparse_handle,
#endif
HIPSPARSE_OPERATION_NON_TRANSPOSE,
inDim, outDim, batchnum, nnz,
&one,
descr,
THCTensor_(data)(state, values),
THCudaIntTensor_data(state, colPtrs),
THCudaIntTensor_data(state, rowInds),
THCTensor_(data)(state, buf), batchnum,
&one, THCTensor_(data)(state, gradWeight), inDim
);
THCTensor_(sum)(state, buf, gradOutput, 0, 1);
THCTensor_(resize1d)(state, buf, outDim);
THCTensor_(cadd)(state, gradBias, gradBias, scale, buf);
if (weightDecay != 0)
{
THCTensor_(cadd)(state, gradWeight, gradWeight, weightDecay, weight);
THCTensor_(cadd)(state, gradBias, gradBias, weightDecay, bias);
}
THCTensor_(free)(state, weight);
THCTensor_(free)(state, buf);
THCTensor_(free)(state, sel);
THCTensor_(free)(state, cols);
THCudaLongTensor_free(state, inds);
THCTensor_(free)(state, values);
THCudaIntTensor_free(state, colbuf);
THCudaIntTensor_free(state, rowInds);
THCudaIntTensor_free(state, colPtrs);
}
void THNN_(SparseLinear_legacyUpdateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias) {
THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors");
}
void THNN_(SparseLinear_legacyAccGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *weight,
THCTensor *bias,
accreal weightDecay,
accreal scale) {
THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors");
}
// Dense updates are pretty fast on the GPU
void THNN_(SparseLinear_zeroGradParameters)(
THCState *state,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *lastInput) {
THCTensor_(zero)(state, gradWeight);
THCTensor_(zero)(state, gradBias);
}
void THNN_(SparseLinear_updateParameters)(
THCState *state,
THCTensor *weight,
THCTensor *bias,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *lastInput,
accreal learningRate) {
THCTensor_(cadd)(state, weight, weight, -learningRate, gradWeight);
THCTensor_(cadd)(state, bias, bias, -learningRate, gradBias);
}
#endif
| e1a0630de4b91b727c7aac2c4807caf7d08321c6.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SparseLinear.cu"
#else
static bool THNN_(checkInput)(THCTensor* t)
{
return !t->is_empty() && t->_dim() == 2 && t->size[1] == 3;
}
static bool THNN_(checkSize2D)(THCTensor* t, int64_t size0, int64_t size1)
{
return !t->is_empty() && t->_dim() == 2 && t->size[0] == size0 && t->size[1] == size1;
}
static bool THNN_(checkSize1D)(THCTensor* t, int64_t size0)
{
return !t->is_empty() && t->_dim() == 1 && t->size[0] == size0;
}
static inline void THNN_(copyCudaFloatingType)(THCState *state, THCudaIntTensor *buf, THCTensor *t) {
#ifdef THC_REAL_IS_FLOAT
THCudaIntTensor_copyCudaFloat(state, buf, t);
#elif defined(THC_REAL_IS_DOUBLE)
THCudaIntTensor_copyCudaDouble(state, buf, t);
#elif defined(THC_REAL_IS_HALF)
THCudaIntTensor_copyCudaHalf(state, buf, t);
#endif
}
void THNN_(SparseLinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias)
{
THAssert(THCTensor_(checkGPU)(state, 4, input, output, weight, bias));
int64_t h;
int64_t outDim = THCTensor_(size)(state, weight, 0);
int64_t inDim = THCTensor_(size)(state, weight, 1);
THArgCheck(THNN_(checkInput)(input), 2, "input size must be nnz x 3");
AT_CHECK(!output->is_empty() && THCTensor_(nDimension)(state, output) == 2,
"output must be batchsize x outputsize, got size: ", output->sizes());
THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong");
weight = THCTensor_(newContiguous)(state, weight);
int64_t batchnum = THCTensor_(size)(state, output, 0);
int64_t nnz = THCTensor_(size)(state, input, 0);
THCTensor *buffer = THCTensor_(new)(state);
THCTensor *sel = THCTensor_(new)(state);
THCTensor *values = THCTensor_(new)(state);
THCudaIntTensor *rowbuf = THCudaIntTensor_new(state);
THCudaIntTensor *csrPtrs = THCudaIntTensor_new(state);
THCudaIntTensor *colInds = THCudaIntTensor_new(state);
THCTensor_(resize1d)(state, values, nnz);
THCudaIntTensor_resize1d(state, rowbuf, nnz);
THCudaIntTensor_resize1d(state, colInds, nnz);
THCudaIntTensor_resize1d(state, csrPtrs, batchnum+1);
// Get data ready for cusparse, need CudaInt buffers
// We do not need to sort, since rows are already in order
// If rows might get out of order in future implementations, or if cusparse
// complains with an illegal memory access, sort like we do in AccGradParameters
THCTensor_(select)(state, sel, input, 1, 0);
THNN_(copyCudaFloatingType)(state, rowbuf, sel);
THCTensor_(select)(state, sel, input, 1, 1);
THNN_(copyCudaFloatingType)(state, colInds, sel);
THCTensor_(select)(state, sel, input, 1, 2);
THCTensor_(copyCuda)(state, values, sel);
init_cusparse();
cusparseXcoo2csr(cusparse_handle,
THCudaIntTensor_data(state, rowbuf), nnz, batchnum,
THCudaIntTensor_data(state, csrPtrs), CUSPARSE_INDEX_BASE_ONE);
// output = bias
THCTensor_(resize2d)(state, buffer, outDim, batchnum);
THCTensor_(zero)(state, buffer);
for (h=0; h<batchnum; h++) {
THCTensor_(select)(state, sel, buffer, 1, h);
THCTensor_(copy)(state, sel, bias);
}
// output = W * x
real one = ScalarConvert<int, real>::to(1);
cusparseMatDescr_t descr = 0;
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ONE);
#ifdef THC_REAL_IS_FLOAT
cusparseScsrmm(cusparse_handle,
#elif defined(THC_REAL_IS_DOUBLE)
cusparseDcsrmm(cusparse_handle,
#endif
CUSPARSE_OPERATION_NON_TRANSPOSE,
batchnum, outDim, inDim, nnz,
&one,
descr,
THCTensor_(data)(state, values),
THCudaIntTensor_data(state, csrPtrs),
THCudaIntTensor_data(state, colInds),
THCTensor_(data)(state, weight), inDim,
&one, THCTensor_(data)(state, buffer), batchnum
);
THCTensor_(transpose)(state, buffer, NULL, 0, 1);
// We do work in the buffer to keep the output contiguous
THCTensor_(copy)(state, output, buffer);
cusparseDestroyMatDescr(descr);
descr = 0;
THCTensor_(free)(state, buffer);
THCTensor_(free)(state, sel);
THCTensor_(free)(state, values);
THCTensor_(free)(state, weight);
THCudaIntTensor_free(state, rowbuf);
THCudaIntTensor_free(state, colInds);
THCudaIntTensor_free(state, csrPtrs);
}
void THNN_(SparseLinear_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *weight,
THCTensor *bias,
accreal weightDecay,
accreal scale)
{
int64_t outDim = THCTensor_(size)(state, weight, 0);
int64_t inDim = THCTensor_(size)(state, weight, 1);
THArgCheck(THNN_(checkInput)(input), 2, "input size must be batchsize x nnz x 2");
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, "gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong");
weight = THCTensor_(newContiguous)(state, weight);
int64_t nnz = THCTensor_(size)(state, input, 0);
int64_t batchnum = THCTensor_(size)(state, gradOutput, 0);
THCTensor *buf = THCTensor_(new)(state);
THCTensor *cols = THCTensor_(new)(state);
THCTensor *sel = THCTensor_(new)(state);
THCudaLongTensor *inds = THCudaLongTensor_new(state);
THCTensor *values = THCTensor_(new)(state);
THCudaIntTensor *colbuf = THCudaIntTensor_new(state);
THCudaIntTensor *colPtrs = THCudaIntTensor_new(state);
THCudaIntTensor *rowInds = THCudaIntTensor_new(state);
THCTensor_(select)(state, sel, input, 1, 0); // rowInds
THCTensor_(select)(state, cols, input, 1, 1); // colInds
THCTensor_(cadd)(state, buf, sel, batchnum, cols); // colInds * buatchdim + rowInds
THCTensor_(sort)(state, buf, inds, buf, 0, 0); // Indices are now in ind
THCTensor_(indexSelect)(state, buf, input, 0, inds);
THCTensor_(resize1d)(state, values, nnz);
THCudaIntTensor_resize1d(state, colbuf, nnz);
THCudaIntTensor_resize1d(state, rowInds, nnz);
THCudaIntTensor_resize1d(state, colPtrs, inDim+1);
// Get data ready for cusparse, need CudaInt buffers
THCTensor_(select)(state, sel, buf, 1, 0);
THNN_(copyCudaFloatingType)(state, rowInds, sel);
THCTensor_(select)(state, sel, buf, 1, 1);
THNN_(copyCudaFloatingType)(state, colbuf, sel);
THCTensor_(select)(state, sel, buf, 1, 2);
THCTensor_(copyCuda)(state, values, sel);
init_cusparse();
// Secretly coo2csc
cusparseXcoo2csr(cusparse_handle,
THCudaIntTensor_data(state, colbuf), nnz, inDim,
THCudaIntTensor_data(state, colPtrs), CUSPARSE_INDEX_BASE_ONE);
// FORTRAN expects contiguous col-major matricies
THCTensor *tgradOutput = THCTensor_(new)(state);
THCTensor_(transpose)(state, tgradOutput, gradOutput, 0, 1);
THCTensor_(resize2d)(state, buf, batchnum, outDim);
THCTensor_(copy)(state, buf, tgradOutput);
THCTensor_(free)(state, tgradOutput);
real one = ScalarConvert<int, real>::to(1);
cusparseMatDescr_t descr = 0;
cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ONE);
#ifdef THC_REAL_IS_FLOAT
cusparseScsrmm(cusparse_handle,
#elif defined(THC_REAL_IS_DOUBLE)
cusparseDcsrmm(cusparse_handle,
#endif
CUSPARSE_OPERATION_NON_TRANSPOSE,
inDim, outDim, batchnum, nnz,
&one,
descr,
THCTensor_(data)(state, values),
THCudaIntTensor_data(state, colPtrs),
THCudaIntTensor_data(state, rowInds),
THCTensor_(data)(state, buf), batchnum,
&one, THCTensor_(data)(state, gradWeight), inDim
);
THCTensor_(sum)(state, buf, gradOutput, 0, 1);
THCTensor_(resize1d)(state, buf, outDim);
THCTensor_(cadd)(state, gradBias, gradBias, scale, buf);
if (weightDecay != 0)
{
THCTensor_(cadd)(state, gradWeight, gradWeight, weightDecay, weight);
THCTensor_(cadd)(state, gradBias, gradBias, weightDecay, bias);
}
THCTensor_(free)(state, weight);
THCTensor_(free)(state, buf);
THCTensor_(free)(state, sel);
THCTensor_(free)(state, cols);
THCudaLongTensor_free(state, inds);
THCTensor_(free)(state, values);
THCudaIntTensor_free(state, colbuf);
THCudaIntTensor_free(state, rowInds);
THCudaIntTensor_free(state, colPtrs);
}
void THNN_(SparseLinear_legacyUpdateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias) {
THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors");
}
void THNN_(SparseLinear_legacyAccGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *weight,
THCTensor *bias,
accreal weightDecay,
accreal scale) {
THError("CUDA does not support legacy input format, please use a table of nnz x 2 vectors");
}
// Dense updates are pretty fast on the GPU
void THNN_(SparseLinear_zeroGradParameters)(
THCState *state,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *lastInput) {
THCTensor_(zero)(state, gradWeight);
THCTensor_(zero)(state, gradBias);
}
void THNN_(SparseLinear_updateParameters)(
THCState *state,
THCTensor *weight,
THCTensor *bias,
THCTensor *gradWeight,
THCTensor *gradBias,
THCTensor *lastInput,
accreal learningRate) {
THCTensor_(cadd)(state, weight, weight, -learningRate, gradWeight);
THCTensor_(cadd)(state, bias, bias, -learningRate, gradBias);
}
#endif
|
78125d01c7d25fcf23b4a0900899f14bd420997d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hipfft.h>
#include <hip/hip_runtime.h>
#include <ctime>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "BeatCalculatorParallel.h"
#ifndef LIBINC
#define LIBINC
#include <mpg123.h>
#include <kiss_fftr.h>
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void differentiate_kernel(int size, float* array, hipfftReal* differentiated) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index == 0 || index == size - 1) {
differentiated[index] = array[index];
}
else if (index < size) {
differentiated[index] = 44100 * (array[index+1]-array[index-1])/2;
}
}
//TODO: look up best way to reduce an array with CUDA
// Currently use first thread in each block to reduce the array corresponding to that block, then return size N array
// Once best found, make this function return single integer
__global__ void calculate_energy(hipfftComplex* sample, hipfftComplex* combs, double* tempEnergies, double* energies, int sample_size, int N) {
int combIdx = blockIdx.x * sample_size;
int sampleIdx = threadIdx.x;
if (sampleIdx < sample_size) {
int a = sample[sampleIdx].x * combs[combIdx + sampleIdx].x - sample[sampleIdx].y * combs[combIdx + sampleIdx].y;
int b = sample[sampleIdx].x * combs[combIdx + sampleIdx].y + sample[sampleIdx].y * combs[combIdx + sampleIdx].x;
tempEnergies[combIdx + sampleIdx] = a * a + b * b;
}
__syncthreads();
if (sampleIdx == 0) {
double energy = 0;
for (int i=0; i < sample_size; i++) {
energy += tempEnergies[combIdx+i];
}
energies[blockIdx.x] = energy;
}
}
//TODO: write kernel function to do this
void generateCombs(int BPM_init, int N, int size, int AmpMax, hipfftReal* hostDataIn) {
for(int i = 0; i < N; i++) {
int BPM = BPM_init + i;
int Ti = 60 * 44100/BPM;
int start = size * i; //compute offset for this comfilter
for(int k = 0; k < size; k+=2) {
if (k % Ti == 0) {
hostDataIn[start+k] = AmpMax;
hostDataIn[start+k+1] = AmpMax;
}
else {
hostDataIn[start+k] = 0;
hostDataIn[start+k+1] = 0;
}
}
}
}
void combFilterFFT(int BPM_init, int BPM_final, int N, int fft_input_size, hipfftComplex* deviceDataOut) {
// Assign Variables
hipfftHandle plan;
hipfftReal* deviceDataIn, *hostDataIn;
int AmpMax = 2147483647;
// Malloc Variables
gpuErrchk( hipMalloc(&deviceDataIn, sizeof(hipfftReal) * fft_input_size * N) );
hostDataIn = (hipfftReal*)malloc(sizeof(hipfftReal) * fft_input_size * N);
//Generate all Combs
generateCombs(BPM_init, N, fft_input_size, AmpMax, hostDataIn);
int n[1] = {fft_input_size};
clock_t begin = clock();
gpuErrchk( hipMemcpy(deviceDataIn, hostDataIn, fft_input_size * N * sizeof(hipfftReal), hipMemcpyHostToDevice) );
clock_t end = clock();
printf("Combfilter copy time: %f\n", double(end-begin)/CLOCKS_PER_SEC);
// Now run the fft
if (hipfftPlanMany(&plan, 1, n, NULL, 1, fft_input_size, NULL, 1, fft_input_size, HIPFFT_R2C, N) != HIPFFT_SUCCESS) {
printf("CUFFT Error - plan creation failed\n");
exit(-1);
}
if (hipfftExecR2C(plan, deviceDataIn, deviceDataOut) != HIPFFT_SUCCESS) {
printf("CUFFT Error - execution of FFT failed\n");
exit(-1);
}
gpuErrchk( hipDeviceSynchronize() );
// Cleanup
if (hipfftDestroy(plan) != HIPFFT_SUCCESS) {
printf("CUFFT Error - plan destruction failed\n");
exit(-1);
}
gpuErrchk( hipFree(deviceDataIn) );
free(hostDataIn);
return;
}
int combFilterAnalysis(hipfftComplex* sample, hipfftComplex* combs, int out_size, int N) {
//Launch a kernel to calculate the instant energy at position $k$ in the filtered sample, for all k, for all N filters
//Run Kernel to determine energies
double *tempEnergies, *deviceEnergies, *hostEnergies;
gpuErrchk( hipMalloc(&tempEnergies, sizeof(double) * out_size * N) );
gpuErrchk( hipMalloc(&deviceEnergies, sizeof(double) * N) );
hostEnergies = (double*)malloc(N * sizeof(double));
const int blocks = N; //want a block for each comb
const int tpb = 512;
hipLaunchKernelGGL(( calculate_energy), dim3(blocks), dim3(tpb), 0, 0, sample, combs, tempEnergies, deviceEnergies, out_size, N);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
//free temp array
gpuErrchk( hipFree(tempEnergies) );
clock_t begin= clock();
//Loop through final array to find the best one
gpuErrchk( hipMemcpy(hostEnergies, deviceEnergies, sizeof(double) * N, hipMemcpyDeviceToHost) );
clock_t end = clock();
printf("Time elapsed during memcpy: %f\n", double(end-begin)/CLOCKS_PER_SEC);
//Calculate max of
double max = -1;
int index = -1;
for (int i = 0; i < N-1; i++) {
//printf("BPM: %d \t Energy: %f \n", 60 + i, hostEnergies[i]);
if (hostEnergies[i] > max) {
max = hostEnergies[i];
index = i;
}
}
gpuErrchk( hipFree(deviceEnergies) );
free(hostEnergies);
return 60 + index;
}
int BeatCalculatorParallel::cuda_detect_beat(char* s, int sample_size) {
clock_t bt = clock();
int max_freq = sample_size/4.4;
int threadsPerBlock = 512;
int blocks = (sample_size + threadsPerBlock - 1)/threadsPerBlock;
// Load mp3
float* sample = (float*)malloc(sizeof(float) * sample_size);
clock_t bm = clock();
readMP3(s, sample, sample_size);
clock_t em = clock();
printf("Elapsed time reading mp3: %f\n", double(em-bm)/CLOCKS_PER_SEC);
bm = clock();
// Step 2: Differentiate
float* deviceSample;
hipfftReal* deviceDifferentiatedSample;
gpuErrchk( hipMalloc(&deviceSample, sizeof(float) * sample_size));
gpuErrchk( hipMalloc(&deviceDifferentiatedSample, sizeof(hipfftReal) * sample_size));
gpuErrchk( hipMemcpy(deviceSample, sample, sample_size * sizeof(float), hipMemcpyHostToDevice));
em = clock();
printf("Elapsed time initial memcpy: %f\n", double(em-bm)/CLOCKS_PER_SEC);
clock_t begin = clock();
//free sample array on host
free(sample);
//differentiate sample on device
hipLaunchKernelGGL(( differentiate_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, sample_size, deviceSample, deviceDifferentiatedSample);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipFree(deviceSample) );
// Perform FFT
hipfftHandle plan1D;
hipfftComplex* deviceFFTOut;
int out_size = sample_size/2 + 1;
gpuErrchk( hipMalloc(&deviceFFTOut, sizeof(hipfftComplex) * out_size));
if (hipfftPlan1d(&plan1D, sample_size, HIPFFT_R2C, 1) != HIPFFT_SUCCESS) {
printf("CUFFT Error - plan creation failed\n");
return 0;
}
if (hipfftExecR2C(plan1D, deviceDifferentiatedSample, deviceFFTOut) != HIPFFT_SUCCESS) {
printf("CUFFT Error - execution of FFT failed\n");
return 0;
}
//free diff'd sample (we don't need it anymore)
gpuErrchk( hipFree(deviceDifferentiatedSample) );
//Create Combs + FFT them
hipfftComplex* combFFTOut;
int BPM_init = 60;
int BPM_final = 210;
int N = (BPM_final - BPM_init);
gpuErrchk( hipMalloc(&combFFTOut, sizeof(hipfftComplex) * out_size * N) );
combFilterFFT(BPM_init, BPM_final, N, sample_size, combFFTOut);
//perform analysis
int BPM = combFilterAnalysis(deviceFFTOut, combFFTOut, out_size, N);
clock_t end = clock();
printf("Elapsed time: %f\n", double(end - begin)/CLOCKS_PER_SEC);
gpuErrchk(hipFree(combFFTOut));
gpuErrchk(hipFree(deviceFFTOut));
clock_t et = clock();
printf("Total Time: %f\n", double(bt-et)/CLOCKS_PER_SEC);
return BPM;
}
| 78125d01c7d25fcf23b4a0900899f14bd420997d.cu | #include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
#include <cuda.h>
#include <ctime>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "BeatCalculatorParallel.h"
#ifndef LIBINC
#define LIBINC
#include <mpg123.h>
#include <kiss_fftr.h>
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void differentiate_kernel(int size, float* array, cufftReal* differentiated) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index == 0 || index == size - 1) {
differentiated[index] = array[index];
}
else if (index < size) {
differentiated[index] = 44100 * (array[index+1]-array[index-1])/2;
}
}
//TODO: look up best way to reduce an array with CUDA
// Currently use first thread in each block to reduce the array corresponding to that block, then return size N array
// Once best found, make this function return single integer
__global__ void calculate_energy(cufftComplex* sample, cufftComplex* combs, double* tempEnergies, double* energies, int sample_size, int N) {
int combIdx = blockIdx.x * sample_size;
int sampleIdx = threadIdx.x;
if (sampleIdx < sample_size) {
int a = sample[sampleIdx].x * combs[combIdx + sampleIdx].x - sample[sampleIdx].y * combs[combIdx + sampleIdx].y;
int b = sample[sampleIdx].x * combs[combIdx + sampleIdx].y + sample[sampleIdx].y * combs[combIdx + sampleIdx].x;
tempEnergies[combIdx + sampleIdx] = a * a + b * b;
}
__syncthreads();
if (sampleIdx == 0) {
double energy = 0;
for (int i=0; i < sample_size; i++) {
energy += tempEnergies[combIdx+i];
}
energies[blockIdx.x] = energy;
}
}
//TODO: write kernel function to do this
void generateCombs(int BPM_init, int N, int size, int AmpMax, cufftReal* hostDataIn) {
for(int i = 0; i < N; i++) {
int BPM = BPM_init + i;
int Ti = 60 * 44100/BPM;
int start = size * i; //compute offset for this comfilter
for(int k = 0; k < size; k+=2) {
if (k % Ti == 0) {
hostDataIn[start+k] = AmpMax;
hostDataIn[start+k+1] = AmpMax;
}
else {
hostDataIn[start+k] = 0;
hostDataIn[start+k+1] = 0;
}
}
}
}
void combFilterFFT(int BPM_init, int BPM_final, int N, int fft_input_size, cufftComplex* deviceDataOut) {
// Assign Variables
cufftHandle plan;
cufftReal* deviceDataIn, *hostDataIn;
int AmpMax = 2147483647;
// Malloc Variables
gpuErrchk( cudaMalloc(&deviceDataIn, sizeof(cufftReal) * fft_input_size * N) );
hostDataIn = (cufftReal*)malloc(sizeof(cufftReal) * fft_input_size * N);
//Generate all Combs
generateCombs(BPM_init, N, fft_input_size, AmpMax, hostDataIn);
int n[1] = {fft_input_size};
clock_t begin = clock();
gpuErrchk( cudaMemcpy(deviceDataIn, hostDataIn, fft_input_size * N * sizeof(cufftReal), cudaMemcpyHostToDevice) );
clock_t end = clock();
printf("Combfilter copy time: %f\n", double(end-begin)/CLOCKS_PER_SEC);
// Now run the fft
if (cufftPlanMany(&plan, 1, n, NULL, 1, fft_input_size, NULL, 1, fft_input_size, CUFFT_R2C, N) != CUFFT_SUCCESS) {
printf("CUFFT Error - plan creation failed\n");
exit(-1);
}
if (cufftExecR2C(plan, deviceDataIn, deviceDataOut) != CUFFT_SUCCESS) {
printf("CUFFT Error - execution of FFT failed\n");
exit(-1);
}
gpuErrchk( cudaDeviceSynchronize() );
// Cleanup
if (cufftDestroy(plan) != CUFFT_SUCCESS) {
printf("CUFFT Error - plan destruction failed\n");
exit(-1);
}
gpuErrchk( cudaFree(deviceDataIn) );
free(hostDataIn);
return;
}
int combFilterAnalysis(cufftComplex* sample, cufftComplex* combs, int out_size, int N) {
//Launch a kernel to calculate the instant energy at position $k$ in the filtered sample, for all k, for all N filters
//Run Kernel to determine energies
double *tempEnergies, *deviceEnergies, *hostEnergies;
gpuErrchk( cudaMalloc(&tempEnergies, sizeof(double) * out_size * N) );
gpuErrchk( cudaMalloc(&deviceEnergies, sizeof(double) * N) );
hostEnergies = (double*)malloc(N * sizeof(double));
const int blocks = N; //want a block for each comb
const int tpb = 512;
calculate_energy<<<blocks, tpb>>>(sample, combs, tempEnergies, deviceEnergies, out_size, N);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
//free temp array
gpuErrchk( cudaFree(tempEnergies) );
clock_t begin= clock();
//Loop through final array to find the best one
gpuErrchk( cudaMemcpy(hostEnergies, deviceEnergies, sizeof(double) * N, cudaMemcpyDeviceToHost) );
clock_t end = clock();
printf("Time elapsed during memcpy: %f\n", double(end-begin)/CLOCKS_PER_SEC);
//Calculate max of
double max = -1;
int index = -1;
for (int i = 0; i < N-1; i++) {
//printf("BPM: %d \t Energy: %f \n", 60 + i, hostEnergies[i]);
if (hostEnergies[i] > max) {
max = hostEnergies[i];
index = i;
}
}
gpuErrchk( cudaFree(deviceEnergies) );
free(hostEnergies);
return 60 + index;
}
int BeatCalculatorParallel::cuda_detect_beat(char* s, int sample_size) {
clock_t bt = clock();
int max_freq = sample_size/4.4;
int threadsPerBlock = 512;
int blocks = (sample_size + threadsPerBlock - 1)/threadsPerBlock;
// Load mp3
float* sample = (float*)malloc(sizeof(float) * sample_size);
clock_t bm = clock();
readMP3(s, sample, sample_size);
clock_t em = clock();
printf("Elapsed time reading mp3: %f\n", double(em-bm)/CLOCKS_PER_SEC);
bm = clock();
// Step 2: Differentiate
float* deviceSample;
cufftReal* deviceDifferentiatedSample;
gpuErrchk( cudaMalloc(&deviceSample, sizeof(float) * sample_size));
gpuErrchk( cudaMalloc(&deviceDifferentiatedSample, sizeof(cufftReal) * sample_size));
gpuErrchk( cudaMemcpy(deviceSample, sample, sample_size * sizeof(float), cudaMemcpyHostToDevice));
em = clock();
printf("Elapsed time initial memcpy: %f\n", double(em-bm)/CLOCKS_PER_SEC);
clock_t begin = clock();
//free sample array on host
free(sample);
//differentiate sample on device
differentiate_kernel<<<blocks, threadsPerBlock>>>(sample_size, deviceSample, deviceDifferentiatedSample);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaFree(deviceSample) );
// Perform FFT
cufftHandle plan1D;
cufftComplex* deviceFFTOut;
int out_size = sample_size/2 + 1;
gpuErrchk( cudaMalloc(&deviceFFTOut, sizeof(cufftComplex) * out_size));
if (cufftPlan1d(&plan1D, sample_size, CUFFT_R2C, 1) != CUFFT_SUCCESS) {
printf("CUFFT Error - plan creation failed\n");
return 0;
}
if (cufftExecR2C(plan1D, deviceDifferentiatedSample, deviceFFTOut) != CUFFT_SUCCESS) {
printf("CUFFT Error - execution of FFT failed\n");
return 0;
}
//free diff'd sample (we don't need it anymore)
gpuErrchk( cudaFree(deviceDifferentiatedSample) );
//Create Combs + FFT them
cufftComplex* combFFTOut;
int BPM_init = 60;
int BPM_final = 210;
int N = (BPM_final - BPM_init);
gpuErrchk( cudaMalloc(&combFFTOut, sizeof(cufftComplex) * out_size * N) );
combFilterFFT(BPM_init, BPM_final, N, sample_size, combFFTOut);
//perform analysis
int BPM = combFilterAnalysis(deviceFFTOut, combFFTOut, out_size, N);
clock_t end = clock();
printf("Elapsed time: %f\n", double(end - begin)/CLOCKS_PER_SEC);
gpuErrchk(cudaFree(combFFTOut));
gpuErrchk(cudaFree(deviceFFTOut));
clock_t et = clock();
printf("Total Time: %f\n", double(bt-et)/CLOCKS_PER_SEC);
return BPM;
}
|
faa5f84883af3c954e5ff5bb5a6c496e619c86be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/arg_ops.h"
#include <limits>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
using KeyValuePair = hipcub::KeyValuePair<TIndex, T>;
template <typename T>
using BlockReduce = hipcub::BlockReduce<KeyValuePair<T>, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class ReduceOp>
__global__ void ComputeArgCUDAKernel(
const T* X,
const TIndex outer_size,
const TIndex inner_size,
const TIndex stride,
const ReduceOp& reduce_op,
const T init,
TIndex* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (TIndex idx = blockIdx.x; idx < outer_size; idx += gridDim.x) {
const TIndex i = idx / stride;
const TIndex j = idx % stride;
KeyValuePair<T> kv = {-1, init};
for (TIndex k = threadIdx.x; k < inner_size; k += blockDim.x) {
kv = reduce_op({k, X[i * inner_size * stride + k * stride + j]}, kv);
}
kv = BlockReduce<T>(temp_storage).Reduce(kv, reduce_op);
if (threadIdx.x == 0) {
Y[idx] = kv.key;
}
__syncthreads();
}
}
} // namespace
template <typename T>
class ArgMaxOp<T, CUDAContext> final : public ArgOpBase<T, CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
ArgMaxOp(const OperatorDef& operator_def, Workspace* ws)
: ArgOpBase<T, CUDAContext>(operator_def, ws) {}
protected:
bool Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) override;
};
template <typename T>
bool ArgMaxOp<T, CUDAContext>::Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) {
const TIndex outer_size = prev_size * next_size;
hipLaunchKernelGGL(( ComputeArgCUDAKernel),
dim3(::min(outer_size, static_cast<TIndex>(CAFFE_MAXIMUM_NUM_BLOCKS))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X,
outer_size,
n,
next_size,
hipcub::ArgMax(),
std::numeric_limits<T>::lowest(),
Y);
return true;
}
template <typename T>
class ArgMinOp<T, CUDAContext> final : public ArgOpBase<T, CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
ArgMinOp(const OperatorDef& operator_def, Workspace* ws)
: ArgOpBase<T, CUDAContext>(operator_def, ws) {}
protected:
bool Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) override;
};
template <typename T>
bool ArgMinOp<T, CUDAContext>::Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) {
const TIndex outer_size = prev_size * next_size;
hipLaunchKernelGGL(( ComputeArgCUDAKernel),
dim3(::min(outer_size, static_cast<TIndex>(CAFFE_MAXIMUM_NUM_BLOCKS))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X,
outer_size,
n,
next_size,
hipcub::ArgMin(),
std::numeric_limits<T>::max(),
Y);
return true;
}
REGISTER_CUDA_OPERATOR(ArgMax, ArgMaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ArgMin, ArgMinOp<float, CUDAContext>);
} // namespace caffe2
| faa5f84883af3c954e5ff5bb5a6c496e619c86be.cu | #include "caffe2/operators/arg_ops.h"
#include <limits>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
using KeyValuePair = cub::KeyValuePair<TIndex, T>;
template <typename T>
using BlockReduce = cub::BlockReduce<KeyValuePair<T>, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class ReduceOp>
__global__ void ComputeArgCUDAKernel(
const T* X,
const TIndex outer_size,
const TIndex inner_size,
const TIndex stride,
const ReduceOp& reduce_op,
const T init,
TIndex* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (TIndex idx = blockIdx.x; idx < outer_size; idx += gridDim.x) {
const TIndex i = idx / stride;
const TIndex j = idx % stride;
KeyValuePair<T> kv = {-1, init};
for (TIndex k = threadIdx.x; k < inner_size; k += blockDim.x) {
kv = reduce_op({k, X[i * inner_size * stride + k * stride + j]}, kv);
}
kv = BlockReduce<T>(temp_storage).Reduce(kv, reduce_op);
if (threadIdx.x == 0) {
Y[idx] = kv.key;
}
__syncthreads();
}
}
} // namespace
template <typename T>
class ArgMaxOp<T, CUDAContext> final : public ArgOpBase<T, CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
ArgMaxOp(const OperatorDef& operator_def, Workspace* ws)
: ArgOpBase<T, CUDAContext>(operator_def, ws) {}
protected:
bool Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) override;
};
template <typename T>
bool ArgMaxOp<T, CUDAContext>::Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) {
const TIndex outer_size = prev_size * next_size;
ComputeArgCUDAKernel<<<
std::min(outer_size, static_cast<TIndex>(CAFFE_MAXIMUM_NUM_BLOCKS)),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X,
outer_size,
n,
next_size,
cub::ArgMax(),
std::numeric_limits<T>::lowest(),
Y);
return true;
}
template <typename T>
class ArgMinOp<T, CUDAContext> final : public ArgOpBase<T, CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
ArgMinOp(const OperatorDef& operator_def, Workspace* ws)
: ArgOpBase<T, CUDAContext>(operator_def, ws) {}
protected:
bool Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) override;
};
template <typename T>
bool ArgMinOp<T, CUDAContext>::Compute(
const T* X,
const TIndex prev_size,
const TIndex next_size,
const TIndex n,
TIndex* Y) {
const TIndex outer_size = prev_size * next_size;
ComputeArgCUDAKernel<<<
std::min(outer_size, static_cast<TIndex>(CAFFE_MAXIMUM_NUM_BLOCKS)),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X,
outer_size,
n,
next_size,
cub::ArgMin(),
std::numeric_limits<T>::max(),
Y);
return true;
}
REGISTER_CUDA_OPERATOR(ArgMax, ArgMaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ArgMin, ArgMinOp<float, CUDAContext>);
} // namespace caffe2
|
519bd9928fb1891ba465314afa2288310134cd9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/block.hpp"
#include "pcl/gpu/utils/device/limits.hpp"
#include "pcl/gpu/utils/device/vector_math.hpp"
#include "pcl/gpu/utils/device/functional.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "thrust/transform.h"
#include "thrust/device_ptr.h"
namespace pcl
{
namespace device
{
//[spinimage][angles] = [0..FSize][..FSize]
extern __shared__ float simage_angles[];
template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return *(float3*)&ptr[index]; }
//template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return tr(ptr[index]); }
struct UseCustomAxis
{
float3 rotation_axis;
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& /*normal*/) const { return rotation_axis; }
};
struct UseCustomAxesCloud
{
const NormalType* rotation_axes_cloud;
__device__ __forceinline__ float3 getRotationAxes(int index, const float3& /*normal*/) const { return fetch(rotation_axes_cloud, index); }
};
struct UseOriginNormal
{
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& normal) const { return normal; }
};
struct Div12eps
{
__device__ __forceinline__ float operator()(float v1, float v2) const { return (float)(v1 / ( v2 + numeric_limits<double>::epsilon() )); }
};
struct DivValIfNonZero
{
float val;
__device__ __forceinline__ DivValIfNonZero(float value) : val(value) {}
__device__ __forceinline__ float operator()(float v) const { return val == 0 ? v : v/val; }
};
template<bool radial, bool angular, typename AxesStrategy>
struct SpinImpl : public AxesStrategy
{
enum
{
CTA_SIZE = 192
};
int work_size;
const int* indices;
const PointType* input_cloud;
const NormalType* input_normals;
const PointType* surface;
const NormalType* normals;
PtrStep<int> neighbor_indices;
const int* neighbor_indices_sizes;
float support_angle_cos;
int min_neighb;
int image_width;
float bin_size;
int FSize;
mutable PtrStep<float> output;
static __device__ __host__ __forceinline__ int computeFSize(int image_width)
{
int cols = 1 + image_width * 2;
int rows = 1 + image_width;
return cols * rows;
}
__device__ __forceinline__ void operator()() const
{
int i_input = blockIdx.x + gridDim.x * blockIdx.y;
int index = indices[i_input];
int neighb_count = neighbor_indices_sizes[i_input];
const int *ginds = neighbor_indices.ptr (i_input);
if (neighb_count < min_neighb)
return;
//set zeros to spin image
Block::fill(simage_angles, simage_angles + FSize, 0.f);
if (angular) //set zeros to angles
Block::fill(simage_angles + FSize, simage_angles + FSize + FSize, 0.f);
__syncthreads();
float3 origin_point = fetch(input_cloud, index);
float3 origin_normal = input_normals ? fetch(input_normals, index) : make_float3(0.f, 0.f, 0.f);
origin_normal = normalized_safe(origin_normal); //normalize if non-zero
float3 rotation_axis = AxesStrategy::getRotationAxes(index, origin_normal);
rotation_axis = normalized_safe(rotation_axis); //normalize if non-zero
const float eps = numeric_limits<float>::epsilon ();
for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE)
{
int neighb_index = ginds[i_neighb];
// first, skip the points with distant normals
float cos_between_normals = -2.f;
if (angular || support_angle_cos > 0.f) // not bogus
{
float3 normal = normalized(fetch(normals, neighb_index));
cos_between_normals = dot(origin_normal, normal);
cos_between_normals = fmax (-1.f, fmin (1.f, cos_between_normals));
if (fabs(cos_between_normals) < support_angle_cos) // allow counter-directed normals
continue;
cos_between_normals = fabs(cos_between_normals); // the normal is not used explicitly from now
}
// now compute the coordinate in cylindric coordinate system associated with the origin point
float3 direction = fetch(surface, neighb_index) - origin_point;
float direction_norm = norm (direction);
// ignore the point itself; it does not contribute really
if (direction_norm < 10 * eps)
continue;
// the angle between the normal vector and the direction to the point
float cos_dir_axis = dot(direction, rotation_axis) / direction_norm;
cos_dir_axis = fmax(-1.f, fmin(1.f, cos_dir_axis));
// compute coordinates w.r.t. the reference frame
float beta = numeric_limits<float>::quiet_NaN();
float alpha = numeric_limits<float>::quiet_NaN();
if (radial) // radial spin image structure
{
beta = asinf(cos_dir_axis); // yes, arc sine! to get the angle against tangent, not normal!
alpha = direction_norm;
}
else // rectangular spin-image structure
{
beta = direction_norm * cos_dir_axis;
alpha = direction_norm * sqrt (1.0 - cos_dir_axis*cos_dir_axis);
if (fabs (beta) >= bin_size * image_width || alpha >= bin_size * image_width)
continue; // outside the cylinder
}
// bilinear interpolation
float beta_bin_size = radial ? (PI*0.5f/image_width) : bin_size;
int beta_bin = floorf(beta / beta_bin_size) + image_width;
int alpha_bin = floorf(alpha / bin_size);
//alpha_bin = min(simage_cols, max(0, alpha_bin));
//beta_bin = min(simage_rows, max(0, beta_bin));
if (alpha_bin == image_width) // border points
{
alpha_bin--;
// HACK: to prevent a > 1
alpha = bin_size * (alpha_bin + 1) - eps;
}
if (beta_bin == 2*image_width ) // border points
{
beta_bin--;
// HACK: to prevent b > 1
beta = beta_bin_size * (beta_bin - image_width + 1) - eps;
}
float a = alpha/bin_size - alpha_bin;
float b = beta/beta_bin_size - float(beta_bin-image_width);
incSpinI(alpha_bin, beta_bin, (1-a) * (1-b));
incSpinI(alpha_bin+1, beta_bin, a * (1-b));
incSpinI(alpha_bin, beta_bin+1, (1-a) * b );
incSpinI(alpha_bin+1, beta_bin+1, a * b );
if (angular)
{
float anlge_betwwn_normals = acos(cos_between_normals);
incAngle(alpha_bin, beta_bin, anlge_betwwn_normals * (1-a) * (1-b));
incAngle(alpha_bin+1, beta_bin, anlge_betwwn_normals * a * (1-b));
incAngle(alpha_bin, beta_bin+1, anlge_betwwn_normals * (1-a) * b );
incAngle(alpha_bin+1, beta_bin+1, anlge_betwwn_normals * a * b );
}
} /* for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE) */
__syncthreads();
if (angular)
{
//transform sum to average dividing angle/spinimage element-wize.
const float *amgles_beg = simage_angles + FSize;
const float *amgles_end = amgles_beg + FSize;
const float *images_beg = simage_angles;
Block::transfrom(amgles_beg, amgles_end, images_beg, output.ptr(i_input), Div12eps());
////Block::copy(amgles_beg, amgles_end, output.ptr(i_input));
//Block::copy(images_beg, images_beg + FSize, output.ptr(i_input));
}
else
{
// copy to compute sum
Block::copy(simage_angles, simage_angles + FSize, simage_angles + FSize);
__syncthreads();
//compute sum
Block::reduce_n(simage_angles + FSize, FSize, pcl::device::plus<float>());
__syncthreads();
float sum = simage_angles[FSize];
Block::transfrom(simage_angles, simage_angles + FSize, output.ptr(i_input), DivValIfNonZero(sum));
}
}
__device__ __forceinline__ void incSpinI(int y, int x, float value) const { atomicAdd(simage_angles + y * (2*image_width + 1) + x, value); }
__device__ __forceinline__ void incAngle(int y, int x, float value) const { atomicAdd(simage_angles+FSize + y * (2*image_width + 1) + x, value); }
};
template<typename Impl>
__global__ void computeSpinKernel(const Impl impl) { impl(); }
template<typename Impl>
inline void computeSpinImages_caller(Impl& impl, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
impl.work_size = (int)indices.size();
impl.indices = indices;
impl.input_cloud = input_cloud;
impl.input_normals = input_normals;
impl.surface = surface;
impl.normals = normals;
impl.neighbor_indices = neighbours;
impl.neighbor_indices_sizes = neighbours.sizes;
impl.min_neighb = min_neighb;
impl.image_width = image_width;
impl.bin_size = bin_size;
impl.support_angle_cos = support_angle_cos;
impl.FSize = Impl::computeFSize(image_width);
impl.output = output;
const int total = (int)indices.size();
const int max_grid_dim = 65535;
const int smem_size = 2 * Impl::computeFSize(image_width) * sizeof(float);
dim3 block(Impl::CTA_SIZE);
dim3 grid(min(total, max_grid_dim), divUp(total, max_grid_dim));
hipLaunchKernelGGL(( computeSpinKernel<Impl>), dim3(grid), dim3(block), smem_size, 0, impl);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
template<bool radial, bool angular>
void computeSpinImagesOriginNormalEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
SpinImpl<radial, angular, UseOriginNormal> si;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxis> si;
si.rotation_axis = rotation_axis;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesCloudEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxesCloud> si;
si.rotation_axes_cloud = rotation_axes_cloud;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
}
}
void pcl::device::computeSpinImagesOrigigNormal(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
typedef void (*originNormal)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int , int , float, PtrStep<float>);
const originNormal table[2][2] =
{
{ computeSpinImagesOriginNormalEx<false, false>, computeSpinImagesOriginNormalEx<false, true> },
{ computeSpinImagesOriginNormalEx<true, false>, computeSpinImagesOriginNormalEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
void pcl::device::computeSpinImagesCustomAxes(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
typedef void (*customAxes)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const float3&, PtrStep<float>);
const customAxes table[2][2] =
{
{ computeSpinImagesCustomAxesEx<false, false>, computeSpinImagesCustomAxesEx<false, true> },
{ computeSpinImagesCustomAxesEx<true, false>, computeSpinImagesCustomAxesEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axis, output);
}
void pcl::device::computeSpinImagesCustomAxesCloud(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
typedef void (*customAxesCloud)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const Normals&, PtrStep<float>);
const customAxesCloud table[2][2] =
{
{ computeSpinImagesCustomAxesCloudEx<false, false>, computeSpinImagesCustomAxesCloudEx<false, true> },
{ computeSpinImagesCustomAxesCloudEx<true, false>, computeSpinImagesCustomAxesCloudEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axes_cloud, output);
};
namespace pcl
{
namespace device
{
struct GtThan
{
int val;
GtThan(int value) : val(value) {}
__device__ __forceinline__ unsigned char operator()(int size) const { return size > val ? 1 : 0; }
};
}
}
void pcl::device::computeMask(const NeighborIndices& neighbours, int min_neighb, DeviceArray<unsigned char>& mask)
{
thrust::device_ptr<int> beg((int*)neighbours.sizes.ptr());
thrust::device_ptr<int> end = beg + neighbours.sizes.size();
thrust::device_ptr<unsigned char> out(mask.ptr());
thrust::transform(beg, end, out, GtThan(min_neighb));
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
| 519bd9928fb1891ba465314afa2288310134cd9b.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "pcl/gpu/utils/device/block.hpp"
#include "pcl/gpu/utils/device/limits.hpp"
#include "pcl/gpu/utils/device/vector_math.hpp"
#include "pcl/gpu/utils/device/functional.hpp"
#include "pcl/gpu/utils/safe_call.hpp"
#include "thrust/transform.h"
#include "thrust/device_ptr.h"
namespace pcl
{
namespace device
{
//[spinimage][angles] = [0..FSize][..FSize]
extern __shared__ float simage_angles[];
template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return *(float3*)&ptr[index]; }
//template<class It> __device__ __forceinline__ float3 fetch(It ptr, int index) { return tr(ptr[index]); }
struct UseCustomAxis
{
float3 rotation_axis;
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& /*normal*/) const { return rotation_axis; }
};
struct UseCustomAxesCloud
{
const NormalType* rotation_axes_cloud;
__device__ __forceinline__ float3 getRotationAxes(int index, const float3& /*normal*/) const { return fetch(rotation_axes_cloud, index); }
};
struct UseOriginNormal
{
__device__ __forceinline__ float3 getRotationAxes(int /*index*/, const float3& normal) const { return normal; }
};
struct Div12eps
{
__device__ __forceinline__ float operator()(float v1, float v2) const { return (float)(v1 / ( v2 + numeric_limits<double>::epsilon() )); }
};
struct DivValIfNonZero
{
float val;
__device__ __forceinline__ DivValIfNonZero(float value) : val(value) {}
__device__ __forceinline__ float operator()(float v) const { return val == 0 ? v : v/val; }
};
template<bool radial, bool angular, typename AxesStrategy>
struct SpinImpl : public AxesStrategy
{
enum
{
CTA_SIZE = 192
};
int work_size;
const int* indices;
const PointType* input_cloud;
const NormalType* input_normals;
const PointType* surface;
const NormalType* normals;
PtrStep<int> neighbor_indices;
const int* neighbor_indices_sizes;
float support_angle_cos;
int min_neighb;
int image_width;
float bin_size;
int FSize;
mutable PtrStep<float> output;
static __device__ __host__ __forceinline__ int computeFSize(int image_width)
{
int cols = 1 + image_width * 2;
int rows = 1 + image_width;
return cols * rows;
}
__device__ __forceinline__ void operator()() const
{
int i_input = blockIdx.x + gridDim.x * blockIdx.y;
int index = indices[i_input];
int neighb_count = neighbor_indices_sizes[i_input];
const int *ginds = neighbor_indices.ptr (i_input);
if (neighb_count < min_neighb)
return;
//set zeros to spin image
Block::fill(simage_angles, simage_angles + FSize, 0.f);
if (angular) //set zeros to angles
Block::fill(simage_angles + FSize, simage_angles + FSize + FSize, 0.f);
__syncthreads();
float3 origin_point = fetch(input_cloud, index);
float3 origin_normal = input_normals ? fetch(input_normals, index) : make_float3(0.f, 0.f, 0.f);
origin_normal = normalized_safe(origin_normal); //normalize if non-zero
float3 rotation_axis = AxesStrategy::getRotationAxes(index, origin_normal);
rotation_axis = normalized_safe(rotation_axis); //normalize if non-zero
const float eps = numeric_limits<float>::epsilon ();
for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE)
{
int neighb_index = ginds[i_neighb];
// first, skip the points with distant normals
float cos_between_normals = -2.f;
if (angular || support_angle_cos > 0.f) // not bogus
{
float3 normal = normalized(fetch(normals, neighb_index));
cos_between_normals = dot(origin_normal, normal);
cos_between_normals = fmax (-1.f, fmin (1.f, cos_between_normals));
if (fabs(cos_between_normals) < support_angle_cos) // allow counter-directed normals
continue;
cos_between_normals = fabs(cos_between_normals); // the normal is not used explicitly from now
}
// now compute the coordinate in cylindric coordinate system associated with the origin point
float3 direction = fetch(surface, neighb_index) - origin_point;
float direction_norm = norm (direction);
// ignore the point itself; it does not contribute really
if (direction_norm < 10 * eps)
continue;
// the angle between the normal vector and the direction to the point
float cos_dir_axis = dot(direction, rotation_axis) / direction_norm;
cos_dir_axis = fmax(-1.f, fmin(1.f, cos_dir_axis));
// compute coordinates w.r.t. the reference frame
float beta = numeric_limits<float>::quiet_NaN();
float alpha = numeric_limits<float>::quiet_NaN();
if (radial) // radial spin image structure
{
beta = asinf(cos_dir_axis); // yes, arc sine! to get the angle against tangent, not normal!
alpha = direction_norm;
}
else // rectangular spin-image structure
{
beta = direction_norm * cos_dir_axis;
alpha = direction_norm * sqrt (1.0 - cos_dir_axis*cos_dir_axis);
if (fabs (beta) >= bin_size * image_width || alpha >= bin_size * image_width)
continue; // outside the cylinder
}
// bilinear interpolation
float beta_bin_size = radial ? (PI*0.5f/image_width) : bin_size;
int beta_bin = floorf(beta / beta_bin_size) + image_width;
int alpha_bin = floorf(alpha / bin_size);
//alpha_bin = min(simage_cols, max(0, alpha_bin));
//beta_bin = min(simage_rows, max(0, beta_bin));
if (alpha_bin == image_width) // border points
{
alpha_bin--;
// HACK: to prevent a > 1
alpha = bin_size * (alpha_bin + 1) - eps;
}
if (beta_bin == 2*image_width ) // border points
{
beta_bin--;
// HACK: to prevent b > 1
beta = beta_bin_size * (beta_bin - image_width + 1) - eps;
}
float a = alpha/bin_size - alpha_bin;
float b = beta/beta_bin_size - float(beta_bin-image_width);
incSpinI(alpha_bin, beta_bin, (1-a) * (1-b));
incSpinI(alpha_bin+1, beta_bin, a * (1-b));
incSpinI(alpha_bin, beta_bin+1, (1-a) * b );
incSpinI(alpha_bin+1, beta_bin+1, a * b );
if (angular)
{
float anlge_betwwn_normals = acos(cos_between_normals);
incAngle(alpha_bin, beta_bin, anlge_betwwn_normals * (1-a) * (1-b));
incAngle(alpha_bin+1, beta_bin, anlge_betwwn_normals * a * (1-b));
incAngle(alpha_bin, beta_bin+1, anlge_betwwn_normals * (1-a) * b );
incAngle(alpha_bin+1, beta_bin+1, anlge_betwwn_normals * a * b );
}
} /* for(int i_neighb = threadIdx.x; i_neighb < neighb_count; i_neighb += CTA_SIZE) */
__syncthreads();
if (angular)
{
//transform sum to average dividing angle/spinimage element-wize.
const float *amgles_beg = simage_angles + FSize;
const float *amgles_end = amgles_beg + FSize;
const float *images_beg = simage_angles;
Block::transfrom(amgles_beg, amgles_end, images_beg, output.ptr(i_input), Div12eps());
////Block::copy(amgles_beg, amgles_end, output.ptr(i_input));
//Block::copy(images_beg, images_beg + FSize, output.ptr(i_input));
}
else
{
// copy to compute sum
Block::copy(simage_angles, simage_angles + FSize, simage_angles + FSize);
__syncthreads();
//compute sum
Block::reduce_n(simage_angles + FSize, FSize, pcl::device::plus<float>());
__syncthreads();
float sum = simage_angles[FSize];
Block::transfrom(simage_angles, simage_angles + FSize, output.ptr(i_input), DivValIfNonZero(sum));
}
}
__device__ __forceinline__ void incSpinI(int y, int x, float value) const { atomicAdd(simage_angles + y * (2*image_width + 1) + x, value); }
__device__ __forceinline__ void incAngle(int y, int x, float value) const { atomicAdd(simage_angles+FSize + y * (2*image_width + 1) + x, value); }
};
template<typename Impl>
__global__ void computeSpinKernel(const Impl impl) { impl(); }
template<typename Impl>
inline void computeSpinImages_caller(Impl& impl, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
impl.work_size = (int)indices.size();
impl.indices = indices;
impl.input_cloud = input_cloud;
impl.input_normals = input_normals;
impl.surface = surface;
impl.normals = normals;
impl.neighbor_indices = neighbours;
impl.neighbor_indices_sizes = neighbours.sizes;
impl.min_neighb = min_neighb;
impl.image_width = image_width;
impl.bin_size = bin_size;
impl.support_angle_cos = support_angle_cos;
impl.FSize = Impl::computeFSize(image_width);
impl.output = output;
const int total = (int)indices.size();
const int max_grid_dim = 65535;
const int smem_size = 2 * Impl::computeFSize(image_width) * sizeof(float);
dim3 block(Impl::CTA_SIZE);
dim3 grid(min(total, max_grid_dim), divUp(total, max_grid_dim));
computeSpinKernel<Impl><<<grid, block, smem_size>>>(impl);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template<bool radial, bool angular>
void computeSpinImagesOriginNormalEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
SpinImpl<radial, angular, UseOriginNormal> si;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxis> si;
si.rotation_axis = rotation_axis;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
template<bool radial, bool angular>
void computeSpinImagesCustomAxesCloudEx(float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours,
int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
SpinImpl<radial, angular, UseCustomAxesCloud> si;
si.rotation_axes_cloud = rotation_axes_cloud;
computeSpinImages_caller(si, support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
}
}
void pcl::device::computeSpinImagesOrigigNormal(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, PtrStep<float> output)
{
typedef void (*originNormal)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int , int , float, PtrStep<float>);
const originNormal table[2][2] =
{
{ computeSpinImagesOriginNormalEx<false, false>, computeSpinImagesOriginNormalEx<false, true> },
{ computeSpinImagesOriginNormalEx<true, false>, computeSpinImagesOriginNormalEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, output);
}
void pcl::device::computeSpinImagesCustomAxes(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const float3& rotation_axis, PtrStep<float> output)
{
typedef void (*customAxes)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const float3&, PtrStep<float>);
const customAxes table[2][2] =
{
{ computeSpinImagesCustomAxesEx<false, false>, computeSpinImagesCustomAxesEx<false, true> },
{ computeSpinImagesCustomAxesEx<true, false>, computeSpinImagesCustomAxesEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axis, output);
}
void pcl::device::computeSpinImagesCustomAxesCloud(bool radial, bool angular, float support_angle_cos, const Indices& indices, const PointCloud& input_cloud, const Normals& input_normals,
const PointCloud& surface, const Normals& normals, const NeighborIndices& neighbours, int min_neighb, int image_width, float bin_size, const Normals& rotation_axes_cloud, PtrStep<float> output)
{
typedef void (*customAxesCloud)(float, const Indices&, const PointCloud&, const Normals&, const PointCloud&, const Normals&, const NeighborIndices&, int, int, float, const Normals&, PtrStep<float>);
const customAxesCloud table[2][2] =
{
{ computeSpinImagesCustomAxesCloudEx<false, false>, computeSpinImagesCustomAxesCloudEx<false, true> },
{ computeSpinImagesCustomAxesCloudEx<true, false>, computeSpinImagesCustomAxesCloudEx<true, true> }
};
table[(int)radial][(int)angular](support_angle_cos, indices, input_cloud, input_normals, surface, normals, neighbours, min_neighb, image_width, bin_size, rotation_axes_cloud, output);
};
namespace pcl
{
namespace device
{
struct GtThan
{
int val;
GtThan(int value) : val(value) {}
__device__ __forceinline__ unsigned char operator()(int size) const { return size > val ? 1 : 0; }
};
}
}
void pcl::device::computeMask(const NeighborIndices& neighbours, int min_neighb, DeviceArray<unsigned char>& mask)
{
thrust::device_ptr<int> beg((int*)neighbours.sizes.ptr());
thrust::device_ptr<int> end = beg + neighbours.sizes.size();
thrust::device_ptr<unsigned char> out(mask.ptr());
thrust::transform(beg, end, out, GtThan(min_neighb));
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
|
e8fd369680a42a507fb65c65e28deefe7c2f5356.hip | // !!! This is a file automatically generated by hipify!!!
#include "virtual_memory.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "stdlib.h"
#include "stdio.h"
// extern int pagefault_num;
__device__ void init_invert_page_table(VirtualMemory *vm) {
for (int i = 0; i < 1024; i++) {
vm->invert_page_table[i] = -1; // invalid := MSB is 1
vm->invert_page_table[i + 1024] = i%4;
}
}
__device__ void vm_init(VirtualMemory *vm, uchar *buffer, uchar *storage,
int *invert_page_table, int *pagefault_num_ptr,
int PAGESIZE, int INVERT_PAGE_TABLE_SIZE,
int PHYSICAL_MEM_SIZE, int STORAGE_SIZE,
int PAGE_ENTRIES, int current_thread) {
// init variables
vm->buffer = buffer;
vm->storage = storage;
vm->invert_page_table = invert_page_table;
vm->pagefault_num_ptr = pagefault_num_ptr;
vm->current_thread = current_thread;
struct Page_node *head;
struct Page_node *current;
head = (struct Page_node *) malloc(100);
head->nxt = NULL;
//hipMalloc((void **)head, 100);
current = head;
for (int i = 0; i < 1024; i++) {
struct Page_node *temp;
//hipMalloc((void **)temp, 100);
temp = (struct Page_node *) malloc(100);
temp->page_idx = i;
temp->nxt = NULL;
current->nxt = temp;
current = temp;
current->nxt = NULL;
//free(temp);
}
vm->LRU_head = head;
// init constants
vm->PAGESIZE = PAGESIZE;
vm->INVERT_PAGE_TABLE_SIZE = INVERT_PAGE_TABLE_SIZE;
vm->PHYSICAL_MEM_SIZE = PHYSICAL_MEM_SIZE;
vm->STORAGE_SIZE = STORAGE_SIZE;
vm->PAGE_ENTRIES = PAGE_ENTRIES;
// before first vm_write or vm_read
init_invert_page_table(vm);
}
__device__ int get_LRUidx(VirtualMemory *vm) { // get the least used index, which is a logical/disk memory address
return vm->LRU_head->nxt->page_idx;
}
__device__ int search_pageidx(VirtualMemory *vm, int page_num) {
for (int i = 0; i < 1024; i++) {
if (vm->invert_page_table[i] == page_num) return i;
}
return -1;
}
__device__ void update_stack(VirtualMemory *vm, int idx) {
struct Page_node *current = vm->LRU_head;
while (current->nxt->page_idx != idx) current = current->nxt;
struct Page_node *target = current->nxt;
current->nxt = target->nxt;
while (current->nxt != NULL) {
current = current->nxt;
}
current->nxt = target;
target->nxt = NULL;
}
__device__ void vm_write(VirtualMemory *vm, u32 addr, uchar value, int thread_id) {
/* Complete vm_write function to write value into data buffer */
int page_num = addr / 32; // addr is the address of disk/logical memory, and page_num is the corresponding page number
// printf("address %d\n", addr);
int search_result = search_pageidx(vm, page_num);
if (search_result == -1 || vm->invert_page_table[search_result+1024] != thread_id) { // not found or not this thread
printf("write page fault %d with thread %d\n",page_num, thread_id);
(*(vm->pagefault_num_ptr))++;
int LRU_idx = get_LRUidx(vm); // LRU index is the index of page table instead of the address of logical memory/disk
int disk_addr = vm->invert_page_table[LRU_idx];
if (disk_addr != -1) {
for (int i = 0; i < 32; i++) { // swap out
vm->storage[disk_addr * 32 + i] = vm->buffer[LRU_idx * 32 + i];
}
}
// update page table
vm->invert_page_table[LRU_idx] = page_num;
vm->invert_page_table[LRU_idx + 1024] = thread_id;
for (int i = 0; i < 32; i++) { // swap in
vm->buffer[LRU_idx * 32 + i] = vm->storage[page_num * 32 + i];
}
}
vm->buffer[search_pageidx(vm, page_num) * 32 + addr % 32] = value; // write into main memory
update_stack(vm, search_pageidx(vm, page_num));
return;
}
__device__ uchar vm_read(VirtualMemory *vm, u32 addr, int thread_id) {
/* Complate vm_read function to read single element from data buffer */
/* Complete vm_write function to write value into data buffer */
int page_num = addr / 32; // addr is the address of disk/logical memory, and page_num is the corresponding page number
int search_result = search_pageidx(vm, page_num);
if (search_result == -1 || vm->invert_page_table[search_result + 1024] != thread_id) { // not found or not this thread
printf("read page fault %d with thread %d\n", page_num, thread_id);
(*(vm->pagefault_num_ptr))++;
int LRU_idx = get_LRUidx(vm); // LRU index is the index of page table instead of the address of logical memory/disk
int disk_addr = vm->invert_page_table[LRU_idx];
if (disk_addr != -1) {
for (int i = 0; i < 32; i++) { // swap out
vm->storage[disk_addr * 32 + i] = vm->buffer[LRU_idx * 32 + i];
}
}
// update page table
vm->invert_page_table[LRU_idx] = page_num;
vm->invert_page_table[LRU_idx + 1024] = thread_id;
for (int i = 0; i < 32; i++) { // swap in
vm->buffer[LRU_idx * 32 + i] = vm->storage[page_num * 32 + i];
}
}
uchar content = vm->buffer[search_pageidx(vm, page_num) * 32 + addr % 32]; // read character
update_stack(vm, search_pageidx(vm, page_num));
return content;
}
__device__ void vm_snapshot(VirtualMemory *vm, uchar *results, int offset, int input_size, int thread_id) {
/* Complete snapshot function togther with vm_read to load elements from data to result buffer */
int temp = 0;
for (int i=0; i<input_size;i++){
int value = vm_read(vm,i, thread_id);
results[i+offset] = value;
}
}
| e8fd369680a42a507fb65c65e28deefe7c2f5356.cu | #include "virtual_memory.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "stdlib.h"
#include "stdio.h"
// extern int pagefault_num;
__device__ void init_invert_page_table(VirtualMemory *vm) {
for (int i = 0; i < 1024; i++) {
vm->invert_page_table[i] = -1; // invalid := MSB is 1
vm->invert_page_table[i + 1024] = i%4;
}
}
__device__ void vm_init(VirtualMemory *vm, uchar *buffer, uchar *storage,
int *invert_page_table, int *pagefault_num_ptr,
int PAGESIZE, int INVERT_PAGE_TABLE_SIZE,
int PHYSICAL_MEM_SIZE, int STORAGE_SIZE,
int PAGE_ENTRIES, int current_thread) {
// init variables
vm->buffer = buffer;
vm->storage = storage;
vm->invert_page_table = invert_page_table;
vm->pagefault_num_ptr = pagefault_num_ptr;
vm->current_thread = current_thread;
struct Page_node *head;
struct Page_node *current;
head = (struct Page_node *) malloc(100);
head->nxt = NULL;
//cudaMalloc((void **)head, 100);
current = head;
for (int i = 0; i < 1024; i++) {
struct Page_node *temp;
//cudaMalloc((void **)temp, 100);
temp = (struct Page_node *) malloc(100);
temp->page_idx = i;
temp->nxt = NULL;
current->nxt = temp;
current = temp;
current->nxt = NULL;
//free(temp);
}
vm->LRU_head = head;
// init constants
vm->PAGESIZE = PAGESIZE;
vm->INVERT_PAGE_TABLE_SIZE = INVERT_PAGE_TABLE_SIZE;
vm->PHYSICAL_MEM_SIZE = PHYSICAL_MEM_SIZE;
vm->STORAGE_SIZE = STORAGE_SIZE;
vm->PAGE_ENTRIES = PAGE_ENTRIES;
// before first vm_write or vm_read
init_invert_page_table(vm);
}
__device__ int get_LRUidx(VirtualMemory *vm) { // get the least used index, which is a logical/disk memory address
return vm->LRU_head->nxt->page_idx;
}
__device__ int search_pageidx(VirtualMemory *vm, int page_num) {
for (int i = 0; i < 1024; i++) {
if (vm->invert_page_table[i] == page_num) return i;
}
return -1;
}
__device__ void update_stack(VirtualMemory *vm, int idx) {
struct Page_node *current = vm->LRU_head;
while (current->nxt->page_idx != idx) current = current->nxt;
struct Page_node *target = current->nxt;
current->nxt = target->nxt;
while (current->nxt != NULL) {
current = current->nxt;
}
current->nxt = target;
target->nxt = NULL;
}
__device__ void vm_write(VirtualMemory *vm, u32 addr, uchar value, int thread_id) {
/* Complete vm_write function to write value into data buffer */
int page_num = addr / 32; // addr is the address of disk/logical memory, and page_num is the corresponding page number
// printf("address %d\n", addr);
int search_result = search_pageidx(vm, page_num);
if (search_result == -1 || vm->invert_page_table[search_result+1024] != thread_id) { // not found or not this thread
printf("write page fault %d with thread %d\n",page_num, thread_id);
(*(vm->pagefault_num_ptr))++;
int LRU_idx = get_LRUidx(vm); // LRU index is the index of page table instead of the address of logical memory/disk
int disk_addr = vm->invert_page_table[LRU_idx];
if (disk_addr != -1) {
for (int i = 0; i < 32; i++) { // swap out
vm->storage[disk_addr * 32 + i] = vm->buffer[LRU_idx * 32 + i];
}
}
// update page table
vm->invert_page_table[LRU_idx] = page_num;
vm->invert_page_table[LRU_idx + 1024] = thread_id;
for (int i = 0; i < 32; i++) { // swap in
vm->buffer[LRU_idx * 32 + i] = vm->storage[page_num * 32 + i];
}
}
vm->buffer[search_pageidx(vm, page_num) * 32 + addr % 32] = value; // write into main memory
update_stack(vm, search_pageidx(vm, page_num));
return;
}
__device__ uchar vm_read(VirtualMemory *vm, u32 addr, int thread_id) {
/* Complate vm_read function to read single element from data buffer */
/* Complete vm_write function to write value into data buffer */
int page_num = addr / 32; // addr is the address of disk/logical memory, and page_num is the corresponding page number
int search_result = search_pageidx(vm, page_num);
if (search_result == -1 || vm->invert_page_table[search_result + 1024] != thread_id) { // not found or not this thread
printf("read page fault %d with thread %d\n", page_num, thread_id);
(*(vm->pagefault_num_ptr))++;
int LRU_idx = get_LRUidx(vm); // LRU index is the index of page table instead of the address of logical memory/disk
int disk_addr = vm->invert_page_table[LRU_idx];
if (disk_addr != -1) {
for (int i = 0; i < 32; i++) { // swap out
vm->storage[disk_addr * 32 + i] = vm->buffer[LRU_idx * 32 + i];
}
}
// update page table
vm->invert_page_table[LRU_idx] = page_num;
vm->invert_page_table[LRU_idx + 1024] = thread_id;
for (int i = 0; i < 32; i++) { // swap in
vm->buffer[LRU_idx * 32 + i] = vm->storage[page_num * 32 + i];
}
}
uchar content = vm->buffer[search_pageidx(vm, page_num) * 32 + addr % 32]; // read character
update_stack(vm, search_pageidx(vm, page_num));
return content;
}
__device__ void vm_snapshot(VirtualMemory *vm, uchar *results, int offset, int input_size, int thread_id) {
/* Complete snapshot function togther with vm_read to load elements from data to result buffer */
int temp = 0;
for (int i=0; i<input_size;i++){
int value = vm_read(vm,i, thread_id);
results[i+offset] = value;
}
}
|
bfe08121305ceab27080494aa8dd67215e4816b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
static size_t N = 1000;
static size_t iter = 200;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
hipDevice_t device;
hipCtx_t context;
hipModule_t moduleAdd, moduleSub;
hipFunction_t vecAdd, vecSub;
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cu_init_device(device_id, device, context);
cu_load_module_function(moduleAdd, "vecAdd.cubin", vecAdd, "vecAdd");
cu_load_module_function(moduleSub, "vecSub.cubin", vecSub, "vecSub");
#pragma omp parallel
{
int l[N], r[N], p[N];
hipDeviceptr_t dl, dr, dp;
init(l, N);
init(r, N);
size_t threads = 256;
size_t blocks = (N - 1) / threads + 1;
DRIVER_API_CALL(hipCtxSetCurrent(context));
hipFunction_t funcs[] = {vecAdd, vecSub};
DRIVER_API_CALL(cuMemAlloc(&dl, N * sizeof(int)));
DRIVER_API_CALL(cuMemAlloc(&dr, N * sizeof(int)));
DRIVER_API_CALL(cuMemAlloc(&dp, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyHtoD(dl, l, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyHtoD(dr, r, N * sizeof(int)));
void *args[5] = {
&dl, &dr, &dp, &N, &iter
};
GPU_TEST_FOR(DRIVER_API_CALL(hipModuleLaunchKernel(funcs[i % 2], blocks, 1, 1, threads, 1, 1, 0, 0, args, 0)));
DRIVER_API_CALL(cuMemcpyDtoH(l, dl, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyDtoH(r, dr, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyDtoH(p, dp, N * sizeof(int)));
DRIVER_API_CALL(hipFree(dl));
DRIVER_API_CALL(hipFree(dr));
DRIVER_API_CALL(hipFree(dp));
#ifdef OUTPUT
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
#endif
DRIVER_API_CALL(hipCtxSynchronize());
}
DRIVER_API_CALL(hipModuleUnload(moduleAdd));
DRIVER_API_CALL(hipModuleUnload(moduleSub));
DRIVER_API_CALL(hipCtxDestroy(context));
RUNTIME_API_CALL(hipDeviceSynchronize());
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
| bfe08121305ceab27080494aa8dd67215e4816b4.cu | #include <cstdio>
#include <omp.h>
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "../utils/common.h"
static size_t N = 1000;
static size_t iter = 200;
void init(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
p[i] = i;
}
}
void output(int *p, size_t size) {
for (size_t i = 0; i < size; ++i) {
printf("index %zu: %d\n", i, p[i]);
}
}
int main(int argc, char *argv[]) {
#ifdef USE_MPI
int numtasks, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
printf("MPI task %d/%d\n", rank, numtasks);
#endif
// Init device
CUdevice device;
CUcontext context;
CUmodule moduleAdd, moduleSub;
CUfunction vecAdd, vecSub;
int device_id = 0;
if (argc > 1) {
device_id = atoi(argv[1]);
}
cu_init_device(device_id, device, context);
cu_load_module_function(moduleAdd, "vecAdd.cubin", vecAdd, "vecAdd");
cu_load_module_function(moduleSub, "vecSub.cubin", vecSub, "vecSub");
#pragma omp parallel
{
int l[N], r[N], p[N];
CUdeviceptr dl, dr, dp;
init(l, N);
init(r, N);
size_t threads = 256;
size_t blocks = (N - 1) / threads + 1;
DRIVER_API_CALL(cuCtxSetCurrent(context));
CUfunction funcs[] = {vecAdd, vecSub};
DRIVER_API_CALL(cuMemAlloc(&dl, N * sizeof(int)));
DRIVER_API_CALL(cuMemAlloc(&dr, N * sizeof(int)));
DRIVER_API_CALL(cuMemAlloc(&dp, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyHtoD(dl, l, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyHtoD(dr, r, N * sizeof(int)));
void *args[5] = {
&dl, &dr, &dp, &N, &iter
};
GPU_TEST_FOR(DRIVER_API_CALL(cuLaunchKernel(funcs[i % 2], blocks, 1, 1, threads, 1, 1, 0, 0, args, 0)));
DRIVER_API_CALL(cuMemcpyDtoH(l, dl, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyDtoH(r, dr, N * sizeof(int)));
DRIVER_API_CALL(cuMemcpyDtoH(p, dp, N * sizeof(int)));
DRIVER_API_CALL(cuMemFree(dl));
DRIVER_API_CALL(cuMemFree(dr));
DRIVER_API_CALL(cuMemFree(dp));
#ifdef OUTPUT
#pragma omp critical
{
printf("Thread %d\n", omp_get_thread_num());
output(p, N);
}
#endif
DRIVER_API_CALL(cuCtxSynchronize());
}
DRIVER_API_CALL(cuModuleUnload(moduleAdd));
DRIVER_API_CALL(cuModuleUnload(moduleSub));
DRIVER_API_CALL(cuCtxDestroy(context));
RUNTIME_API_CALL(cudaDeviceSynchronize());
#ifdef USE_MPI
MPI_Finalize();
#endif
return 0;
}
|
f564a6f442d1a0f29d45158eab454d0a9da27153.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <chrono>
#include <cstring>
#include <fstream>
#include <iostream>
#include <stdexcept>
#include "tiffio.h"
#define THREAD_DIM 20
#define MAX_FILTER_DIM 7
// saves TIFF file from data in `raster`
void save_tiff(const char *fname, uint32 *raster, uint32 w, uint32 h) {
TIFF *tif = TIFFOpen(fname, "w");
if (! raster) {
throw std::runtime_error("Could not open output file");
}
TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, w);
TIFFSetField(tif, TIFFTAG_IMAGELENGTH, h);
TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 4);
TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8);
TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFWriteEncodedStrip(tif, 0, raster, w*h*4);
TIFFClose(tif);
}
// loads image data from `fname` (allocating dynamic memory)
// *w and *h are updated with the image dimensions
// raster is a matrix flattened into an array using row-major order
// every uint32 in the array is 4 bytes, enconding 8-bit packed ABGR
// A: transparency attribute (can be ignored)
// B: blue pixel
// G: green pixel
// R: red pixel
uint32 *load_tiff(const char *fname, uint32 *w, uint32 *h) {
TIFF *tif = TIFFOpen(fname, "r");
if (! tif) {
throw std::runtime_error("Could not open input file");
}
TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, w);
TIFFGetField(tif, TIFFTAG_IMAGELENGTH, h);
uint32 *raster = (uint32 *) _TIFFmalloc(*w * *h * sizeof (uint32));
if (! raster) {
TIFFClose(tif);
throw std::runtime_error("Memory allocation error");
}
if (! TIFFReadRGBAImageOriented(tif, *w, *h, raster, ORIENTATION_TOPLEFT, 0)) {
TIFFClose(tif);
throw std::runtime_error("Could not read raster from TIFF image");
}
TIFFClose(tif);
return raster;
}
// Clamp function able to be used on the GPU
__device__ void cudaClamp(float *val) {
if (*val < 0) *val = 0;
if (*val > 255) *val = 255;
}
void clamp(float *val) {
if (*val < 0) *val = 0;
if (*val > 255) *val = 255;
}
// Kernel for calculation of one pixel in the raster
__global__ void convolve(uint32 *raster, uint32 *copy,
int w, int h, const float *filter, int st, int d) {
// Calculate the row and column indices of the pixel to handle based on the block and thread
int row = st + (blockIdx.y * blockDim.y) + threadIdx.y;
int col = st + (blockIdx.x * blockDim.x) + threadIdx.x;
int accumIndex = row*w + col;
// Check to make sure we are in a valid part of the grid (blocks overflow off the grid)
if (row < h-st && col < w-st) {
// Shared memory that holds the pixels to be worked on by this thread block
__shared__ uint32 support[(THREAD_DIM+MAX_FILTER_DIM) * (THREAD_DIM+MAX_FILTER_DIM)];
int supportDim = THREAD_DIM+(st*2);
int supportRow = st+threadIdx.y;
int supportCol = st+threadIdx.x;
// Load in the pixels this thread is responsible for bringing into thread block shared memory
// from the GPU global memory
// General case (load in the pixel you want to work on)
support[supportRow*supportDim + supportCol] = copy[row*w + col];
// Left edge of block
if (threadIdx.x == 0) {
for (int k = -st; k < 0; k++) {
support[supportRow*supportDim + supportCol+k] = copy[row*w + col+k];
}
// Top left corner
if (threadIdx.y == 0) {
for (int i = -st; i <= 0; i++) {
for (int k = -st; k <= 0; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
// Bottom Left corner
if (threadIdx.y == blockDim.y-1 || row == h-st-st) {
for (int i = 0; i <= st; i++) {
for (int k = -st; k < 0; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
}
// Right edge of block
if (threadIdx.x == blockDim.x-1 || col == w-st-st) {
for (int k = 0; k <= st; k++) {
support[supportRow*supportDim + supportCol+k] = copy[row*w + col+k];
}
// Top right corner
if (threadIdx.y == 0) {
for (int i = -st; i <= 0; i++) {
for (int k = 0; k <= st; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
// Bottom right corner
if (threadIdx.y == blockDim.y-1 || row == h-st-st) {
for (int i = 0; i <= st; i++) {
for (int k = 0; k <= st; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
}
// Top edge of block
if (threadIdx.y == 0) {
for (int i = -st; i < 0; i++) {
support[(supportRow+i)*supportDim + supportCol] = copy[(row+i)*w + col];
}
}
// Bottom edge of block
if (threadIdx.y == blockDim.y-1 || row == h-st-st) {
for (int i = 0; i <= st; i++) {
support[(supportRow+i)*supportDim + supportCol] = copy[(row+i)*w + col];
}
}
// Done creating support (in shared memory) in parallel, syncing
__syncthreads();
// Accumulate RGB values
float sumR, sumG, sumB;
uint32 idx, pixel;
sumR = sumG = sumB = 0;
for (uint32 k = 0 ; k < d ; k ++) {
idx = (supportRow-st+k)*supportDim + (supportCol-st);
for (uint32 l = 0 ; l < d ; l++) {
pixel = support[idx++];
sumR += (filter[k*d + l] * TIFFGetR(pixel));
sumG += (filter[k*d + l] * TIFFGetG(pixel));
sumB += (filter[k*d + l] * TIFFGetB(pixel));
}
}
// Check that RGB channels to write to the raster are not <0 or >255
cudaClamp(&sumR);
cudaClamp(&sumG);
cudaClamp(&sumB);
// Write the ARGB channels to the pixel using bitwise shifts and ORing of bits
raster[row*w + col] = TIFFGetA(copy[row*w + col]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR);
}
}
void filter_image_seq(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len) {
// to get RGB values from a pixel, you can either use bitwise masks
// or rely on the following macros:
// TIFFGetR(raster[i]) red
// TIFFGetG(raster[i]) green
// TIFFGetB(raster[i]) blue
// TIFFGetA(raster[i]) this value should be ignored
//
// to modify RGB values from a pixel, you can use bitwise shifts or masks
// each pixel stores values in the order ABGR
//
// TODO: here you will filter the image in raster
//
uint32 *copy = new uint32[w*h];
std::memcpy(copy, raster, sizeof(uint32)*w*h);
uint32 d = (uint32) std::sqrt(f_len);
uint32 idx, pixel;
uint32 st = d / 2;
uint32 end_w = w - d/2;
uint32 end_h = h - d/2;
float sumR, sumG, sumB;
// applies filter
for (uint32 i = st ; i < end_h ; i++) {
for (uint32 j = st ; j < end_w ; j++) {
sumR = sumG = sumB = 0;
for (uint32 k = 0 ; k < d ; k ++) {
idx = (i-st+k)*w + (j-st);
for (uint32 l = 0 ; l < d ; l++) {
pixel = copy[idx++];
sumR += (filter[k*d + l] * TIFFGetR(pixel));
sumG += (filter[k*d + l] * TIFFGetG(pixel));
sumB += (filter[k*d + l] * TIFFGetB(pixel));
}
}
clamp(&sumR);
clamp(&sumG);
clamp(&sumB);
raster[i*w + j] = TIFFGetA(raster[i*w + j]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR);
}
}
delete [] copy;
}
void filter_image_par(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len) {
//
// TODO: here you will filter the image in raster using GPU threads
//
// to get RGB values from a pixel, you can either use bitwise masks
// or rely on the following macros:
// TIFFGetR(raster[i]) red
// TIFFGetG(raster[i]) green
// TIFFGetB(raster[i]) blue
// TIFFGetA(raster[i]) this value should be ignored
//
// to modify RGB values from a pixel, you can use bitwise shifts or masks
// each pixel stores values in the order ABGR
//
// TODO: here you will filter the image in raster
//
uint32 d = (uint32) std::sqrt(f_len);
uint32 st = d / 2;
uint32 end_w = w - d/2;
uint32 end_h = h - d/2;
// CUDA CODE---------------------------------------------------------------------------
// create pointers for the CUDA arrays
uint32 *dev_raster;
float *dev_filter;
uint32 *dev_copy;
// variable to check for CUDA errors
hipError_t status;
// choose GPU to run
status = hipSetDevice(0);
if (status != hipSuccess) std::cerr << "hipSetDevice failed!" << std::endl;
// allocate space for the arrays in the GPU
status = hipMalloc(&dev_raster, sizeof(uint32) * (w*h));
if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl;
status = hipMalloc(&dev_copy, sizeof(uint32) * (w*h));
if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl;
status = hipMalloc(&dev_filter, sizeof(float) * f_len);
if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl;
// transfer data from CPU to GPU
status = hipMemcpy(dev_raster, raster, sizeof(float) * (w*h), hipMemcpyHostToDevice);
if (status != hipSuccess) std::cerr << "hipMemcpy H2D failed!" << std::endl;
status = hipMemcpy(dev_copy, raster, sizeof(float) * (w*h), hipMemcpyHostToDevice);
if (status != hipSuccess) std::cerr << "hipMemcpy H2D failed!" << std::endl;
status = hipMemcpy(dev_filter, filter, sizeof(float) * (f_len), hipMemcpyHostToDevice);
if (status != hipSuccess) std::cerr << "hipMemcpy H2D failed!" << std::endl;
// Computes how many blocks will fit into the image with one thread per pixel
// Overflows past the end of the image rows and columns when the image dimensions (x&y) don't divide evenly by the block dimensions (x&y)
dim3 threadsPerBlock(THREAD_DIM, THREAD_DIM, 1);
dim3 numBlocks((int)::ceil((float)(end_w-st)/(float)threadsPerBlock.x),
(int)::ceil((float)(end_h-st)/(float)threadsPerBlock.y), 1);
// do the work in the GPU
hipLaunchKernelGGL(( convolve), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dev_raster, dev_copy, w, h, dev_filter, st, d);
// wait for the kernel to finish, and check for errors
status = hipDeviceSynchronize();
if (status != hipSuccess) std::cerr << "error code " << status << " returned after kernel!" << std::endl;
// transfer results from GPU to CPU
status = hipMemcpy(raster, dev_raster, sizeof(uint32) * (w*h), hipMemcpyDeviceToHost);
if (status != hipSuccess) std::cerr << "hipMemcpy D2H failed!" << std::endl;
// free the memory allocated in the GPU
hipFree(dev_raster);
hipFree(dev_filter);
}
float *load_filter(const char *fname, int *n) {
std::ifstream myfile(fname);
if (! myfile) {
throw std::runtime_error("Could not open filter file");
}
myfile >> *n;
float *filter = new float[*n];
for (int i = 0 ; i < *n ; i++) myfile >> filter[i];
myfile.close();
return filter;
}
int main(int argc, char* argv[]) {
if (argc != 5) {
std::cout << "Usage:\t./filter <in_fname> <out_fname> <filter_fname> <algo>" << std::endl;
std::cout << "<in_fname> path to the input image" << std::endl;
std::cout << "<out_fname> path to the output image" << std::endl;
std::cout << "<filter_fname> path to the filter file" << std::endl;
std::cout << "<algo> whether to use the sequential (seq) or parallel algorithm (par)" << std::endl;
return 0;
}
uint32 width, height;
// loads the filter
int f_len;
float *filter = load_filter(argv[3], &f_len);
// loads image bytes from file name supplied as a command line argument
// this function allocates memory dynamically
uint32 *image = load_tiff(argv[1], &width, &height);
// Make a malloc in the GPU to load the CUDA library and make sure that it is working properly
uint32 *dev_initCuda;
hipError_t status;
status = hipMalloc(&dev_initCuda, 1);
if (status != hipSuccess) std::cerr << "hipMalloc (in) failed!" << std::endl;
// measure time of the algorithm
auto start = std::chrono::high_resolution_clock::now();
if (! std::strcmp(argv[4], "seq")) {
// call the sequential implementation
filter_image_seq(image, width, height, filter, f_len);
} else if (! std::strcmp(argv[4], "par")) {
// TODO: call the parallel implementation
filter_image_par(image, width, height, filter, f_len);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
std::cout << diff.count();
// save new file with filtered image
save_tiff(argv[2], image, width, height);
// frees memory allocated by load_filter and load_tiff
delete [] filter;
_TIFFfree(image);
return 0;
}
| f564a6f442d1a0f29d45158eab454d0a9da27153.cu | #include <cmath>
#include <chrono>
#include <cstring>
#include <fstream>
#include <iostream>
#include <stdexcept>
#include "tiffio.h"
#define THREAD_DIM 20
#define MAX_FILTER_DIM 7
// saves TIFF file from data in `raster`
void save_tiff(const char *fname, uint32 *raster, uint32 w, uint32 h) {
TIFF *tif = TIFFOpen(fname, "w");
if (! raster) {
throw std::runtime_error("Could not open output file");
}
TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, w);
TIFFSetField(tif, TIFFTAG_IMAGELENGTH, h);
TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 4);
TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8);
TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFWriteEncodedStrip(tif, 0, raster, w*h*4);
TIFFClose(tif);
}
// loads image data from `fname` (allocating dynamic memory)
// *w and *h are updated with the image dimensions
// raster is a matrix flattened into an array using row-major order
// every uint32 in the array is 4 bytes, enconding 8-bit packed ABGR
// A: transparency attribute (can be ignored)
// B: blue pixel
// G: green pixel
// R: red pixel
uint32 *load_tiff(const char *fname, uint32 *w, uint32 *h) {
TIFF *tif = TIFFOpen(fname, "r");
if (! tif) {
throw std::runtime_error("Could not open input file");
}
TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, w);
TIFFGetField(tif, TIFFTAG_IMAGELENGTH, h);
uint32 *raster = (uint32 *) _TIFFmalloc(*w * *h * sizeof (uint32));
if (! raster) {
TIFFClose(tif);
throw std::runtime_error("Memory allocation error");
}
if (! TIFFReadRGBAImageOriented(tif, *w, *h, raster, ORIENTATION_TOPLEFT, 0)) {
TIFFClose(tif);
throw std::runtime_error("Could not read raster from TIFF image");
}
TIFFClose(tif);
return raster;
}
// Clamp function able to be used on the GPU
__device__ void cudaClamp(float *val) {
if (*val < 0) *val = 0;
if (*val > 255) *val = 255;
}
void clamp(float *val) {
if (*val < 0) *val = 0;
if (*val > 255) *val = 255;
}
// Kernel for calculation of one pixel in the raster
__global__ void convolve(uint32 *raster, uint32 *copy,
int w, int h, const float *filter, int st, int d) {
// Calculate the row and column indices of the pixel to handle based on the block and thread
int row = st + (blockIdx.y * blockDim.y) + threadIdx.y;
int col = st + (blockIdx.x * blockDim.x) + threadIdx.x;
int accumIndex = row*w + col;
// Check to make sure we are in a valid part of the grid (blocks overflow off the grid)
if (row < h-st && col < w-st) {
// Shared memory that holds the pixels to be worked on by this thread block
__shared__ uint32 support[(THREAD_DIM+MAX_FILTER_DIM) * (THREAD_DIM+MAX_FILTER_DIM)];
int supportDim = THREAD_DIM+(st*2);
int supportRow = st+threadIdx.y;
int supportCol = st+threadIdx.x;
// Load in the pixels this thread is responsible for bringing into thread block shared memory
// from the GPU global memory
// General case (load in the pixel you want to work on)
support[supportRow*supportDim + supportCol] = copy[row*w + col];
// Left edge of block
if (threadIdx.x == 0) {
for (int k = -st; k < 0; k++) {
support[supportRow*supportDim + supportCol+k] = copy[row*w + col+k];
}
// Top left corner
if (threadIdx.y == 0) {
for (int i = -st; i <= 0; i++) {
for (int k = -st; k <= 0; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
// Bottom Left corner
if (threadIdx.y == blockDim.y-1 || row == h-st-st) {
for (int i = 0; i <= st; i++) {
for (int k = -st; k < 0; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
}
// Right edge of block
if (threadIdx.x == blockDim.x-1 || col == w-st-st) {
for (int k = 0; k <= st; k++) {
support[supportRow*supportDim + supportCol+k] = copy[row*w + col+k];
}
// Top right corner
if (threadIdx.y == 0) {
for (int i = -st; i <= 0; i++) {
for (int k = 0; k <= st; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
// Bottom right corner
if (threadIdx.y == blockDim.y-1 || row == h-st-st) {
for (int i = 0; i <= st; i++) {
for (int k = 0; k <= st; k++) {
support[(supportRow+i)*supportDim + supportCol+k] = copy[(row+i)*w + col+k];
}
}
}
}
// Top edge of block
if (threadIdx.y == 0) {
for (int i = -st; i < 0; i++) {
support[(supportRow+i)*supportDim + supportCol] = copy[(row+i)*w + col];
}
}
// Bottom edge of block
if (threadIdx.y == blockDim.y-1 || row == h-st-st) {
for (int i = 0; i <= st; i++) {
support[(supportRow+i)*supportDim + supportCol] = copy[(row+i)*w + col];
}
}
// Done creating support (in shared memory) in parallel, syncing
__syncthreads();
// Accumulate RGB values
float sumR, sumG, sumB;
uint32 idx, pixel;
sumR = sumG = sumB = 0;
for (uint32 k = 0 ; k < d ; k ++) {
idx = (supportRow-st+k)*supportDim + (supportCol-st);
for (uint32 l = 0 ; l < d ; l++) {
pixel = support[idx++];
sumR += (filter[k*d + l] * TIFFGetR(pixel));
sumG += (filter[k*d + l] * TIFFGetG(pixel));
sumB += (filter[k*d + l] * TIFFGetB(pixel));
}
}
// Check that RGB channels to write to the raster are not <0 or >255
cudaClamp(&sumR);
cudaClamp(&sumG);
cudaClamp(&sumB);
// Write the ARGB channels to the pixel using bitwise shifts and ORing of bits
raster[row*w + col] = TIFFGetA(copy[row*w + col]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR);
}
}
void filter_image_seq(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len) {
// to get RGB values from a pixel, you can either use bitwise masks
// or rely on the following macros:
// TIFFGetR(raster[i]) red
// TIFFGetG(raster[i]) green
// TIFFGetB(raster[i]) blue
// TIFFGetA(raster[i]) this value should be ignored
//
// to modify RGB values from a pixel, you can use bitwise shifts or masks
// each pixel stores values in the order ABGR
//
// TODO: here you will filter the image in raster
//
uint32 *copy = new uint32[w*h];
std::memcpy(copy, raster, sizeof(uint32)*w*h);
uint32 d = (uint32) std::sqrt(f_len);
uint32 idx, pixel;
uint32 st = d / 2;
uint32 end_w = w - d/2;
uint32 end_h = h - d/2;
float sumR, sumG, sumB;
// applies filter
for (uint32 i = st ; i < end_h ; i++) {
for (uint32 j = st ; j < end_w ; j++) {
sumR = sumG = sumB = 0;
for (uint32 k = 0 ; k < d ; k ++) {
idx = (i-st+k)*w + (j-st);
for (uint32 l = 0 ; l < d ; l++) {
pixel = copy[idx++];
sumR += (filter[k*d + l] * TIFFGetR(pixel));
sumG += (filter[k*d + l] * TIFFGetG(pixel));
sumB += (filter[k*d + l] * TIFFGetB(pixel));
}
}
clamp(&sumR);
clamp(&sumG);
clamp(&sumB);
raster[i*w + j] = TIFFGetA(raster[i*w + j]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR);
}
}
delete [] copy;
}
void filter_image_par(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len) {
//
// TODO: here you will filter the image in raster using GPU threads
//
// to get RGB values from a pixel, you can either use bitwise masks
// or rely on the following macros:
// TIFFGetR(raster[i]) red
// TIFFGetG(raster[i]) green
// TIFFGetB(raster[i]) blue
// TIFFGetA(raster[i]) this value should be ignored
//
// to modify RGB values from a pixel, you can use bitwise shifts or masks
// each pixel stores values in the order ABGR
//
// TODO: here you will filter the image in raster
//
uint32 d = (uint32) std::sqrt(f_len);
uint32 st = d / 2;
uint32 end_w = w - d/2;
uint32 end_h = h - d/2;
// CUDA CODE---------------------------------------------------------------------------
// create pointers for the CUDA arrays
uint32 *dev_raster;
float *dev_filter;
uint32 *dev_copy;
// variable to check for CUDA errors
cudaError_t status;
// choose GPU to run
status = cudaSetDevice(0);
if (status != cudaSuccess) std::cerr << "cudaSetDevice failed!" << std::endl;
// allocate space for the arrays in the GPU
status = cudaMalloc(&dev_raster, sizeof(uint32) * (w*h));
if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl;
status = cudaMalloc(&dev_copy, sizeof(uint32) * (w*h));
if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl;
status = cudaMalloc(&dev_filter, sizeof(float) * f_len);
if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl;
// transfer data from CPU to GPU
status = cudaMemcpy(dev_raster, raster, sizeof(float) * (w*h), cudaMemcpyHostToDevice);
if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed!" << std::endl;
status = cudaMemcpy(dev_copy, raster, sizeof(float) * (w*h), cudaMemcpyHostToDevice);
if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed!" << std::endl;
status = cudaMemcpy(dev_filter, filter, sizeof(float) * (f_len), cudaMemcpyHostToDevice);
if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed!" << std::endl;
// Computes how many blocks will fit into the image with one thread per pixel
// Overflows past the end of the image rows and columns when the image dimensions (x&y) don't divide evenly by the block dimensions (x&y)
dim3 threadsPerBlock(THREAD_DIM, THREAD_DIM, 1);
dim3 numBlocks((int)std::ceil((float)(end_w-st)/(float)threadsPerBlock.x),
(int)std::ceil((float)(end_h-st)/(float)threadsPerBlock.y), 1);
// do the work in the GPU
convolve<<<numBlocks, threadsPerBlock>>>(dev_raster, dev_copy, w, h, dev_filter, st, d);
// wait for the kernel to finish, and check for errors
status = cudaThreadSynchronize();
if (status != cudaSuccess) std::cerr << "error code " << status << " returned after kernel!" << std::endl;
// transfer results from GPU to CPU
status = cudaMemcpy(raster, dev_raster, sizeof(uint32) * (w*h), cudaMemcpyDeviceToHost);
if (status != cudaSuccess) std::cerr << "cudaMemcpy D2H failed!" << std::endl;
// free the memory allocated in the GPU
cudaFree(dev_raster);
cudaFree(dev_filter);
}
float *load_filter(const char *fname, int *n) {
std::ifstream myfile(fname);
if (! myfile) {
throw std::runtime_error("Could not open filter file");
}
myfile >> *n;
float *filter = new float[*n];
for (int i = 0 ; i < *n ; i++) myfile >> filter[i];
myfile.close();
return filter;
}
int main(int argc, char* argv[]) {
if (argc != 5) {
std::cout << "Usage:\t./filter <in_fname> <out_fname> <filter_fname> <algo>" << std::endl;
std::cout << "<in_fname> path to the input image" << std::endl;
std::cout << "<out_fname> path to the output image" << std::endl;
std::cout << "<filter_fname> path to the filter file" << std::endl;
std::cout << "<algo> whether to use the sequential (seq) or parallel algorithm (par)" << std::endl;
return 0;
}
uint32 width, height;
// loads the filter
int f_len;
float *filter = load_filter(argv[3], &f_len);
// loads image bytes from file name supplied as a command line argument
// this function allocates memory dynamically
uint32 *image = load_tiff(argv[1], &width, &height);
// Make a malloc in the GPU to load the CUDA library and make sure that it is working properly
uint32 *dev_initCuda;
cudaError_t status;
status = cudaMalloc(&dev_initCuda, 1);
if (status != cudaSuccess) std::cerr << "cudaMalloc (in) failed!" << std::endl;
// measure time of the algorithm
auto start = std::chrono::high_resolution_clock::now();
if (! std::strcmp(argv[4], "seq")) {
// call the sequential implementation
filter_image_seq(image, width, height, filter, f_len);
} else if (! std::strcmp(argv[4], "par")) {
// TODO: call the parallel implementation
filter_image_par(image, width, height, filter, f_len);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
std::cout << diff.count();
// save new file with filtered image
save_tiff(argv[2], image, width, height);
// frees memory allocated by load_filter and load_tiff
delete [] filter;
_TIFFfree(image);
return 0;
}
|
ba82e13b716c8d56deabf947e7cebe314e1b735a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/epilogue_workspace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel computes accumulator data and stores it out
template <typename Epilogue>
__global__ void kernel_epilogue_workspace(typename Epilogue::Params params) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
Epilogue epilogue(params, shared_storage, warp_id, lane_id);
//
// Initialize accumulator tile
//
typename Epilogue::FragmentC accum;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Epilogue::FragmentC::kElements; ++i) {
accum[i] = Element(warp_id * blockDim.x + lane_id);
}
//
// Efficient epilogue
//
cutlass::GemmCoord tb_tile_coord{blockIdx.x, blockIdx.y, 0};
cutlass::GemmCoord problem_size =
tb_tile_coord *
cutlass::GemmCoord{Epilogue::Shape::kM, Epilogue::Shape::kN, 1};
// Store accumulators
epilogue(
problem_size,
tb_tile_coord,
accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_epilogue_workspace, tensor_op_128x128_64x64) {
//
// Define an instance of the epilogue and see if it works
//
static int const kWarpCount = 4;
static int const kWarpSize = 32;
using Shape = cutlass::MatrixShape<128, 128>;
using FragmentC = cutlass::Array<int, Shape::kCount / (kWarpCount * kWarpSize)>;
using Epilogue = cutlass::gemm::threadblock::EpilogueWorkspace<
Shape,
kWarpCount,
FragmentC
>;
typename Epilogue::Params params(
);
// Launch the kernel
dim3 grid(1,1);
dim3 block(kWarpSize, kWarpCount);
hipLaunchKernelGGL(( test::gemm::threadblock::kernel_epilogue_workspace<Epilogue>), dim3(grid), dim3(block) , 0, 0,
params
);
hipError_t result = hipDeviceSynchronize();
EXPECT_EQ(result, hipSuccess) << "Kernel launch error - " << hipGetErrorString(result);
//
//
//
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| ba82e13b716c8d56deabf947e7cebe314e1b735a.cu | /***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Unit tests for thread-level GEMM
*/
#include "../../common/cutlass_unit_test.h"
#include "cutlass/epilogue/epilogue_workspace.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace test {
namespace gemm {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel computes accumulator data and stores it out
template <typename Epilogue>
__global__ void kernel_epilogue_workspace(typename Epilogue::Params params) {
__shared__ typename Epilogue::SharedStorage shared_storage;
int warp_id = threadIdx.y;
int lane_id = threadIdx.x;
Epilogue epilogue(params, shared_storage, warp_id, lane_id);
//
// Initialize accumulator tile
//
typename Epilogue::FragmentC accum;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Epilogue::FragmentC::kElements; ++i) {
accum[i] = Element(warp_id * blockDim.x + lane_id);
}
//
// Efficient epilogue
//
cutlass::GemmCoord tb_tile_coord{blockIdx.x, blockIdx.y, 0};
cutlass::GemmCoord problem_size =
tb_tile_coord *
cutlass::GemmCoord{Epilogue::Shape::kM, Epilogue::Shape::kN, 1};
// Store accumulators
epilogue(
problem_size,
tb_tile_coord,
accum);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace gemm
} // namespace test
/////////////////////////////////////////////////////////////////////////////////////////////////
TEST(SM75_gemm_threadblock_epilogue_workspace, tensor_op_128x128_64x64) {
//
// Define an instance of the epilogue and see if it works
//
static int const kWarpCount = 4;
static int const kWarpSize = 32;
using Shape = cutlass::MatrixShape<128, 128>;
using FragmentC = cutlass::Array<int, Shape::kCount / (kWarpCount * kWarpSize)>;
using Epilogue = cutlass::gemm::threadblock::EpilogueWorkspace<
Shape,
kWarpCount,
FragmentC
>;
typename Epilogue::Params params(
);
// Launch the kernel
dim3 grid(1,1);
dim3 block(kWarpSize, kWarpCount);
test::gemm::threadblock::kernel_epilogue_workspace<Epilogue><<< grid, block >>>(
params
);
cudaError_t result = cudaDeviceSynchronize();
EXPECT_EQ(result, cudaSuccess) << "Kernel launch error - " << cudaGetErrorString(result);
//
//
//
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
52703a36826d76f72881c34d7b299dbc6b182c25.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "RBMCopyFilterKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *weightPtr = NULL;
hipMalloc(&weightPtr, XSIZE*YSIZE);
float *filterPtr = NULL;
hipMalloc(&filterPtr, XSIZE*YSIZE);
int weightCount = 1;
int i = 1;
int thisLayerSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
RBMCopyFilterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightPtr,filterPtr,weightCount,i,thisLayerSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
RBMCopyFilterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightPtr,filterPtr,weightCount,i,thisLayerSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
RBMCopyFilterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightPtr,filterPtr,weightCount,i,thisLayerSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 52703a36826d76f72881c34d7b299dbc6b182c25.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "RBMCopyFilterKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *weightPtr = NULL;
cudaMalloc(&weightPtr, XSIZE*YSIZE);
float *filterPtr = NULL;
cudaMalloc(&filterPtr, XSIZE*YSIZE);
int weightCount = 1;
int i = 1;
int thisLayerSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
RBMCopyFilterKernel<<<gridBlock,threadBlock>>>(weightPtr,filterPtr,weightCount,i,thisLayerSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
RBMCopyFilterKernel<<<gridBlock,threadBlock>>>(weightPtr,filterPtr,weightCount,i,thisLayerSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
RBMCopyFilterKernel<<<gridBlock,threadBlock>>>(weightPtr,filterPtr,weightCount,i,thisLayerSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0a9fd009dc47fad97f73f005debbbc0ff1a4b73d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <random>
#include "metrics/vMeasure.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct vMeasureParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double beta;
bool sameArrays;
double tolerance;
};
//test fixture class
template <typename T>
class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<vMeasureParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
//allocating and initializing memory to the GPU
CUDA_CHECK(hipStreamCreate(&stream));
MLCommon::allocate(truthClusterArray, nElements, true);
MLCommon::allocate(predClusterArray, nElements, true);
MLCommon::updateDevice(truthClusterArray, &arr1[0], (int)nElements, stream);
MLCommon::updateDevice(predClusterArray, &arr2[0], (int)nElements, stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new defaultDeviceAllocator);
//calculating the golden output
double truthHomogeity, truthCompleteness;
truthHomogeity = MLCommon::Metrics::homogeneityScore(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
truthCompleteness = MLCommon::Metrics::homogeneityScore(
predClusterArray, truthClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
if (truthCompleteness + truthHomogeity == 0.0)
truthVMeasure = 0.0;
else
truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness /
(params.beta * truthHomogeity + truthCompleteness));
//calling the vMeasure CUDA implementation
computedVMeasure = MLCommon::Metrics::vMeasure(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream, params.beta);
}
//the destructor
void TearDown() override {
CUDA_CHECK(hipFree(truthClusterArray));
CUDA_CHECK(hipFree(predClusterArray));
CUDA_CHECK(hipStreamDestroy(stream));
}
//declaring the data values
vMeasureParam params;
T lowerLabelRange, upperLabelRange;
T* truthClusterArray = nullptr;
T* predClusterArray = nullptr;
int nElements = 0;
double truthVMeasure = 0;
double computedVMeasure = 0;
hipStream_t stream;
};
//setting test parameter values
const std::vector<vMeasureParam> inputs = {
{199, 1, 10, 1.0, false, 0.000001}, {200, 15, 100, 1.0, false, 0.000001},
{100, 1, 20, 1.0, false, 0.000001}, {10, 1, 10, 1.0, false, 0.000001},
{198, 1, 100, 1.0, false, 0.000001}, {300, 3, 99, 1.0, false, 0.000001},
{199, 1, 10, 1.0, true, 0.000001}, {200, 15, 100, 1.0, true, 0.000001},
{100, 1, 20, 1.0, true, 0.000001}, {10, 1, 10, 1.0, true, 0.000001},
{198, 1, 100, 1.0, true, 0.000001}, {300, 3, 99, 1.0, true, 0.000001}};
//writing the test suite
typedef vMeasureTest<int> vMeasureTestClass;
TEST_P(vMeasureTestClass, Result) {
ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
| 0a9fd009dc47fad97f73f005debbbc0ff1a4b73d.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <random>
#include "metrics/vMeasure.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct vMeasureParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
double beta;
bool sameArrays;
double tolerance;
};
//test fixture class
template <typename T>
class vMeasureTest : public ::testing::TestWithParam<vMeasureParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<vMeasureParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
//allocating and initializing memory to the GPU
CUDA_CHECK(cudaStreamCreate(&stream));
MLCommon::allocate(truthClusterArray, nElements, true);
MLCommon::allocate(predClusterArray, nElements, true);
MLCommon::updateDevice(truthClusterArray, &arr1[0], (int)nElements, stream);
MLCommon::updateDevice(predClusterArray, &arr2[0], (int)nElements, stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new defaultDeviceAllocator);
//calculating the golden output
double truthHomogeity, truthCompleteness;
truthHomogeity = MLCommon::Metrics::homogeneityScore(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
truthCompleteness = MLCommon::Metrics::homogeneityScore(
predClusterArray, truthClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
if (truthCompleteness + truthHomogeity == 0.0)
truthVMeasure = 0.0;
else
truthVMeasure = ((1 + params.beta) * truthHomogeity * truthCompleteness /
(params.beta * truthHomogeity + truthCompleteness));
//calling the vMeasure CUDA implementation
computedVMeasure = MLCommon::Metrics::vMeasure(
truthClusterArray, predClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream, params.beta);
}
//the destructor
void TearDown() override {
CUDA_CHECK(cudaFree(truthClusterArray));
CUDA_CHECK(cudaFree(predClusterArray));
CUDA_CHECK(cudaStreamDestroy(stream));
}
//declaring the data values
vMeasureParam params;
T lowerLabelRange, upperLabelRange;
T* truthClusterArray = nullptr;
T* predClusterArray = nullptr;
int nElements = 0;
double truthVMeasure = 0;
double computedVMeasure = 0;
cudaStream_t stream;
};
//setting test parameter values
const std::vector<vMeasureParam> inputs = {
{199, 1, 10, 1.0, false, 0.000001}, {200, 15, 100, 1.0, false, 0.000001},
{100, 1, 20, 1.0, false, 0.000001}, {10, 1, 10, 1.0, false, 0.000001},
{198, 1, 100, 1.0, false, 0.000001}, {300, 3, 99, 1.0, false, 0.000001},
{199, 1, 10, 1.0, true, 0.000001}, {200, 15, 100, 1.0, true, 0.000001},
{100, 1, 20, 1.0, true, 0.000001}, {10, 1, 10, 1.0, true, 0.000001},
{198, 1, 100, 1.0, true, 0.000001}, {300, 3, 99, 1.0, true, 0.000001}};
//writing the test suite
typedef vMeasureTest<int> vMeasureTestClass;
TEST_P(vMeasureTestClass, Result) {
ASSERT_NEAR(computedVMeasure, truthVMeasure, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(vMeasure, vMeasureTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
Subsets and Splits